code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int=3 , UpperCamelCase__: Union[str, Any]=32 , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=10 , UpperCamelCase__: List[str]=[10, 20, 30, 40] , UpperCamelCase__: Tuple=[1, 1, 2, 1] , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[str]=True , UpperCamelCase__: str="relu" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=None , ):
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : str = image_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : List[str] = embeddings_size
lowerCamelCase__ : Dict = hidden_sizes
lowerCamelCase__ : Optional[Any] = depths
lowerCamelCase__ : Dict = is_training
lowerCamelCase__ : str = use_labels
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Any ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : Union[str, Any] = TFResNetModel(config=UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: int ):
lowerCamelCase__ : Union[str, Any] = self.num_labels
lowerCamelCase__ : Dict = TFResNetForImageClassification(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a = False
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = TFResNetModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self: int ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
def check_hidden_states_output(UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase__ : Tuple = layer_type
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Union[str, Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = TFResNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: int ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase__ : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase__ , atol=1e-4 ) )
| 41 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowercase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase : Optional[Any] = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=lowerCAmelCase_ , image_encoder=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , renderer=lowerCAmelCase_ , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if latents is None:
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_snake_case = latents.to(lowerCAmelCase_ )
_snake_case = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self , lowerCAmelCase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_snake_case = torch.device(F'cuda:{gpu_id}' )
_snake_case = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCAmelCase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(image[0] , torch.Tensor ):
_snake_case = torch.cat(lowerCAmelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCAmelCase_ , axis=0 )
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
_snake_case = self.image_processor(lowerCAmelCase_ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
_snake_case = image.to(dtype=self.image_encoder.dtype , device=lowerCAmelCase_ )
_snake_case = self.image_encoder(lowerCAmelCase_ )['last_hidden_state']
_snake_case = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_snake_case = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
_snake_case = torch.zeros_like(lowerCAmelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_snake_case = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 25 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 4.0 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_snake_case = 1
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
_snake_case = image.shape[0]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_snake_case = len(lowerCAmelCase_ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase_ )}' )
_snake_case = self._execution_device
_snake_case = batch_size * num_images_per_prompt
_snake_case = guidance_scale > 1.0
_snake_case = self._encode_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# prior
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = self.scheduler.timesteps
_snake_case = self.prior.config.num_embeddings
_snake_case = self.prior.config.embedding_dim
_snake_case = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_snake_case = latents.reshape(latents.shape[0] , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.prior(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , proj_embedding=lowerCAmelCase_ , ).predicted_image_embedding
# remove the variance
_snake_case , _snake_case = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_snake_case , _snake_case = noise_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_snake_case = self.scheduler.step(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , sample=lowerCAmelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCAmelCase_ )
_snake_case = []
for i, latent in enumerate(lowerCAmelCase_ ):
print()
_snake_case = self.renderer.decode(
latent[None, :] , lowerCAmelCase_ , size=lowerCAmelCase_ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(lowerCAmelCase_ )
_snake_case = torch.stack(lowerCAmelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_snake_case = images.cpu().numpy()
if output_type == "pil":
_snake_case = [self.numpy_to_pil(lowerCAmelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCAmelCase_ )
| 42 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(a , a , a , a )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a , a , a , a ):
__a = full_name.split("adaptor." )[-1]
__a = name.split("." )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a , a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(a , a , bias=a )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ):
__a = WavaVecaConfig.from_pretrained(
a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , )
__a = MBartConfig.from_pretrained(a )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a )
recursively_load_weights_wavaveca(model.encoder , a )
# load decoder weights
__a = MBartForCausalLM(a )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__a = SpeechEncoderDecoderModel(encoder=a , decoder=a )
__a = False
__a = MBartaaTokenizer(a )
tokenizer.save_pretrained(a )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = "mbart50"
__a = "wav2vec2"
__a = tokenizer.eos_token_id
__a = 2_5_0_0_0_4
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a )
hf_wavavec.save_pretrained(a )
feature_extractor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 | 0 |
import os
from datetime import datetime as dt
from github import Github
__lowercase = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = Github(os.environ['''GITHUB_TOKEN'''] )
__UpperCamelCase :Optional[int] = g.get_repo('''huggingface/accelerate''' )
__UpperCamelCase :Dict = repo.get_issues(state='''open''' )
for issue in open_issues:
__UpperCamelCase :Any = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__UpperCamelCase :Union[str, Any] = dt.utcnow()
__UpperCamelCase :Tuple = (current_time - issue.updated_at).days
__UpperCamelCase :str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 43 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = hidden_states.shape
_lowerCAmelCase : List[Any] = jax.image.resize(
a__ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
_lowerCAmelCase : str = self.conv(a__ )
return hidden_states
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__ ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_lowerCAmelCase : Optional[int] = self.conv(a__ )
return hidden_states
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : int = None
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = None
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : Tuple = self.in_channels if self.out_channels is None else self.out_channels
_lowerCAmelCase : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase : Tuple = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Optional[Any] = nn.Dense(a__ , dtype=self.dtype )
_lowerCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase : Any = nn.Dropout(self.dropout_prob )
_lowerCAmelCase : Optional[Any] = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCAmelCase : str = None
if use_nin_shortcut:
_lowerCAmelCase : Dict = nn.Conv(
a__ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , a__ , a__ , a__=True ):
_lowerCAmelCase : Union[str, Any] = hidden_states
_lowerCAmelCase : Union[str, Any] = self.norma(a__ )
_lowerCAmelCase : Union[str, Any] = nn.swish(a__ )
_lowerCAmelCase : Dict = self.conva(a__ )
_lowerCAmelCase : Any = self.time_emb_proj(nn.swish(a__ ) )
_lowerCAmelCase : int = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 )
_lowerCAmelCase : Optional[Any] = hidden_states + temb
_lowerCAmelCase : Optional[Any] = self.norma(a__ )
_lowerCAmelCase : int = nn.swish(a__ )
_lowerCAmelCase : Optional[int] = self.dropout(a__ , a__ )
_lowerCAmelCase : List[Any] = self.conva(a__ )
if self.conv_shortcut is not None:
_lowerCAmelCase : Tuple = self.conv_shortcut(a__ )
return hidden_states + residual
| 44 | """simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__)
def _lowerCamelCase( a ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(a , "git_log.json" ) , "w" ) as f:
json.dump(a , a , indent=4 )
def _lowerCamelCase( a ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["WORLD_SIZE"] )
__a = int(os.environ["N_GPU_NODE"] )
__a = int(os.environ["RANK"] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _lowerCamelCase( a ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 261 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'beit'
def __init__( self , _a=8_192 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=False , _a=False , _a=False , _a=False , _a=0.1 , _a=0.1 , _a=True , _a=[3, 5, 7, 11] , _a=[1, 2, 3, 6] , _a=True , _a=0.4 , _a=256 , _a=1 , _a=False , _a=255 , **_a , ):
super().__init__(**_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = use_mask_token
__a = use_absolute_position_embeddings
__a = use_relative_position_bias
__a = use_shared_relative_position_bias
__a = layer_scale_init_value
__a = drop_path_rate
__a = use_mean_pooling
# decode head attributes (semantic segmentation)
__a = out_indices
__a = pool_scales
# auxiliary head attributes (semantic segmentation)
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = semantic_loss_ignore_index
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
| 45 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 261 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : bytes ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE )
lowerCAmelCase = """""".join(bin(SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowerCAmelCase = B"""=""" * ((6 - len(SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE ) % 6)
else:
lowerCAmelCase = B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
"""argument should be a bytes-like object or ASCII string, """
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
lowerCAmelCase = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowerCAmelCase = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowerCAmelCase = encoded_data[:-padding]
lowerCAmelCase = """""".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowerCAmelCase = """""".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowerCAmelCase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | """simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )]
def __str__( self ):
__a = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(lowerCamelCase , len(str(lowerCamelCase ) ) )
__a = F"%{max_element_length}s"
# Make string and return
def single_line(lowerCamelCase ) -> str:
nonlocal string_format_identifier
__a = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def a__ ( self , lowerCamelCase ):
if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCamelCase , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
__a = value
def __add__( self , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , lowerCamelCase ):
return self + (-another)
def __mul__( self , lowerCamelCase ):
if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = F"Unsupported type given for another ({type(lowerCamelCase )})"
raise TypeError(lowerCamelCase )
def a__ ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCamelCase( ):
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(F"a^(-1) is {ainv}" )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" )
def _lowerCamelCase( ):
import doctest
doctest.testmod()
testa()
| 261 | 0 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowerCamelCase : Optional[int] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
state_dict.pop('pixel_mean' , _UpperCamelCase )
state_dict.pop('pixel_std' , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_SCREAMING_SNAKE_CASE =key.replace(_UpperCamelCase , _UpperCamelCase )
if re.match(_UpperCamelCase , _UpperCamelCase ):
_SCREAMING_SNAKE_CASE =int(re.match(_UpperCamelCase , _UpperCamelCase ).group(2 ) )
if layer_nb == 0:
_SCREAMING_SNAKE_CASE =key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
_SCREAMING_SNAKE_CASE =key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
_SCREAMING_SNAKE_CASE =key.replace('layers.2' , 'proj_out' )
_SCREAMING_SNAKE_CASE =value
_SCREAMING_SNAKE_CASE =model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Dict="ybelkada/segment-anything" ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =hf_hub_download(_UpperCamelCase , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
_SCREAMING_SNAKE_CASE =SamConfig()
elif "sam_vit_l" in model_name:
_SCREAMING_SNAKE_CASE =SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_SCREAMING_SNAKE_CASE =SamConfig(
vision_config=_UpperCamelCase , )
elif "sam_vit_h" in model_name:
_SCREAMING_SNAKE_CASE =SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_SCREAMING_SNAKE_CASE =SamConfig(
vision_config=_UpperCamelCase , )
_SCREAMING_SNAKE_CASE =torch.load(_UpperCamelCase , map_location='cpu' )
_SCREAMING_SNAKE_CASE =replace_keys(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =SamImageProcessor()
_SCREAMING_SNAKE_CASE =SamProcessor(image_processor=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =SamModel(_UpperCamelCase )
hf_model.load_state_dict(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =hf_model.to('cuda' )
_SCREAMING_SNAKE_CASE ='https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
_SCREAMING_SNAKE_CASE =Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' )
_SCREAMING_SNAKE_CASE =[[[4_00, 6_50]]]
_SCREAMING_SNAKE_CASE =[[1]]
_SCREAMING_SNAKE_CASE =processor(images=np.array(_UpperCamelCase ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**_UpperCamelCase )
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_SCREAMING_SNAKE_CASE =processor(
images=np.array(_UpperCamelCase ) , input_points=_UpperCamelCase , input_labels=_UpperCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**_UpperCamelCase )
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_SCREAMING_SNAKE_CASE =((75, 2_75, 17_25, 8_50),)
_SCREAMING_SNAKE_CASE =processor(images=np.array(_UpperCamelCase ) , input_boxes=_UpperCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**_UpperCamelCase )
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_SCREAMING_SNAKE_CASE =[[[4_00, 6_50], [8_00, 6_50]]]
_SCREAMING_SNAKE_CASE =[[1, 1]]
_SCREAMING_SNAKE_CASE =processor(
images=np.array(_UpperCamelCase ) , input_points=_UpperCamelCase , input_labels=_UpperCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**_UpperCamelCase )
_SCREAMING_SNAKE_CASE =output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
lowerCamelCase : int = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 47 | """simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase( a , a , a , a , a=True , a="pt" ):
__a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , )
def _lowerCamelCase( a , a , a=None , ):
__a = input_ids.ne(a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ):
super().__init__()
__a = Path(lowerCamelCase ).joinpath(type_path + ".source" )
__a = Path(lowerCamelCase ).joinpath(type_path + ".target" )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" )
__a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" )
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" )
__a = source_inputs["input_ids"].squeeze()
__a = target_inputs["input_ids"].squeeze()
__a = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( lowerCamelCase ):
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def a__ ( self , lowerCamelCase ):
__a = torch.stack([x["input_ids"] for x in batch] )
__a = torch.stack([x["attention_mask"] for x in batch] )
__a = torch.stack([x["decoder_input_ids"] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = trim_batch(lowerCamelCase , lowerCamelCase )
__a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase )
__a = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__)
def _lowerCamelCase( a ):
return list(itertools.chain.from_iterable(a ) )
def _lowerCamelCase( a ):
__a = get_git_info()
save_json(a , os.path.join(a , "git_log.json" ) )
def _lowerCamelCase( a , a , a=4 , **a ):
with open(a , "w" ) as f:
json.dump(a , a , indent=a , **a )
def _lowerCamelCase( a ):
with open(a ) as f:
return json.load(a )
def _lowerCamelCase( ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase( a , a ):
return list(map(a , a ) )
def _lowerCamelCase( a , a ):
with open(a , "wb" ) as f:
return pickle.dump(a , a )
def _lowerCamelCase( a ):
def remove_articles(a ):
return re.sub(R"\b(a|an|the)\b" , " " , a )
def white_space_fix(a ):
return " ".join(text.split() )
def remove_punc(a ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) )
def _lowerCamelCase( a , a ):
__a = normalize_answer(a ).split()
__a = normalize_answer(a ).split()
__a = Counter(a ) & Counter(a )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(a )
__a = 1.0 * num_same / len(a )
__a = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase( a , a ):
return normalize_answer(a ) == normalize_answer(a )
def _lowerCamelCase( a , a ):
assert len(a ) == len(a )
__a = 0
for hypo, pred in zip(a , a ):
em += exact_match_score(a , a )
if len(a ) > 0:
em /= len(a )
return {"em": em}
def _lowerCamelCase( a ):
return model_prefix.startswith("rag" )
def _lowerCamelCase( a , a , a ):
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = "dropout_rate"
for p in extra_params:
if getattr(a , a , a ):
if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(a ) )
delattr(a , a )
continue
__a = p if hasattr(a , a ) else equivalent_param[p]
setattr(a , a , getattr(a , a ) )
delattr(a , a )
return hparams, config
| 261 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 48 | """simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case :str = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] = ['''YolosFeatureExtractor''']
__snake_case :Optional[Any] = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = kwargs.pop("padding_side" , "right" )
__a = kwargs.pop("return_attention_mask" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
__a = processed_features[self.model_input_names[0]]
__a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
__a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__a = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
__a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
__a = "tf"
elif is_torch_tensor(lowerCamelCase ):
__a = "pt"
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
__a = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(lowerCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__a = to_numpy(lowerCamelCase )
else:
__a = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
__a = processed_features[self.model_input_names[0]]
__a = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__a = []
for i in range(lowerCamelCase ):
__a = {k: v[i] for k, v in processed_features.items()}
# truncation
__a = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__a = PaddingStrategy.MAX_LENGTH
__a = {}
for i in range(lowerCamelCase ):
# padding
__a = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__a = []
if value.dtype is np.dtype(np.floataa ):
__a = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ):
__a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__a = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__a = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__a = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (0, difference) )
__a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
__a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__a = processed_features["attention_mask"][:max_length]
return processed_features
def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
__a = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = padding
else:
__a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 261 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """deformable_detr"""
UpperCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : str , UpperCAmelCase : Any=True , UpperCAmelCase : str=None , UpperCAmelCase : Dict=3 , UpperCAmelCase : Union[str, Any]=300 , UpperCAmelCase : Dict=1024 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : Optional[int]=1024 , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : List[str]=6 , UpperCAmelCase : int=1024 , UpperCAmelCase : str=8 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]="relu" , UpperCAmelCase : str=256 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : List[str]=0.0_2 , UpperCAmelCase : int=1.0 , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : str="sine" , UpperCAmelCase : int="resnet50" , UpperCAmelCase : Any=True , UpperCAmelCase : int=False , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[int]=300 , UpperCAmelCase : List[str]=False , UpperCAmelCase : int=1 , UpperCAmelCase : Optional[Any]=5 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[Any]=0.2_5 , UpperCAmelCase : Dict=False , **UpperCAmelCase : Optional[Any] , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCamelCase__ : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = backbone_config.get('model_type' )
lowerCamelCase__ : int = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Union[str, Any] = config_class.from_dict(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = use_timm_backbone
lowerCamelCase__ : List[Any] = backbone_config
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = num_queries
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Optional[Any] = d_model
lowerCamelCase__ : List[str] = encoder_ffn_dim
lowerCamelCase__ : Optional[Any] = encoder_layers
lowerCamelCase__ : Tuple = encoder_attention_heads
lowerCamelCase__ : Optional[int] = decoder_ffn_dim
lowerCamelCase__ : List[Any] = decoder_layers
lowerCamelCase__ : Optional[Any] = decoder_attention_heads
lowerCamelCase__ : str = dropout
lowerCamelCase__ : Any = attention_dropout
lowerCamelCase__ : Any = activation_dropout
lowerCamelCase__ : Tuple = activation_function
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : str = init_xavier_std
lowerCamelCase__ : Optional[int] = encoder_layerdrop
lowerCamelCase__ : Union[str, Any] = auxiliary_loss
lowerCamelCase__ : List[Any] = position_embedding_type
lowerCamelCase__ : List[Any] = backbone
lowerCamelCase__ : int = use_pretrained_backbone
lowerCamelCase__ : List[str] = dilation
# deformable attributes
lowerCamelCase__ : Any = num_feature_levels
lowerCamelCase__ : Optional[int] = encoder_n_points
lowerCamelCase__ : Union[str, Any] = decoder_n_points
lowerCamelCase__ : Union[str, Any] = two_stage
lowerCamelCase__ : str = two_stage_num_proposals
lowerCamelCase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCamelCase__ : int = class_cost
lowerCamelCase__ : str = bbox_cost
lowerCamelCase__ : Optional[int] = giou_cost
# Loss coefficients
lowerCamelCase__ : int = mask_loss_coefficient
lowerCamelCase__ : int = dice_loss_coefficient
lowerCamelCase__ : Tuple = bbox_loss_coefficient
lowerCamelCase__ : Union[str, Any] = giou_loss_coefficient
lowerCamelCase__ : Any = eos_coefficient
lowerCamelCase__ : Any = focal_alpha
lowerCamelCase__ : Tuple = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def A_ ( self : str ) -> int:
return self.encoder_attention_heads
@property
def A_ ( self : str ) -> int:
return self.d_model
def A_ ( self : Optional[Any] ) -> List[Any]:
lowerCamelCase__ : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase__ : Union[str, Any] = self.backbone_config.to_dict()
lowerCamelCase__ : Dict = self.__class__.model_type
return output
| 50 | """simple docstring"""
from collections import Counter
from timeit import timeit
def _lowerCamelCase( a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _lowerCamelCase( a = "" ):
if len(a ) == 0:
return True
__a = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a = {}
for character in lower_case_input_str:
__a = character_freq_dict.get(a , 0 ) + 1
__a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCamelCase( a = "" ):
print("\nFor string = " , a , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 261 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __snake_case ( a ):
UpperCAmelCase__ : List[str] = '''bert'''
def __init__( self : List[str] , _snake_case : int=30522 , _snake_case : List[Any]=768 , _snake_case : Union[str, Any]=12 , _snake_case : Optional[Any]=12 , _snake_case : Dict=3072 , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : str=0.1 , _snake_case : int=512 , _snake_case : int=2 , _snake_case : int=0.0_2 , _snake_case : Tuple=1e-12 , _snake_case : Dict=0 , _snake_case : int="absolute" , _snake_case : Any=True , _snake_case : List[str]=None , **_snake_case : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class __snake_case ( a ):
@property
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 51 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCamelCase : Any = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class A__ ( unittest.TestCase ):
_UpperCAmelCase :int = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_UpperCAmelCase :Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_UpperCAmelCase :Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_UpperCAmelCase :Union[str, Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
UpperCamelCase : str = text_classifier("This is great !" )
self.assertEqual(nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
UpperCamelCase : Optional[Any] = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
UpperCamelCase : str = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
UpperCamelCase : str = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
UpperCamelCase : str = text_classifier("This is great !" , return_all_scores=A_ )
self.assertEqual(nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
UpperCamelCase : Any = text_classifier("This is great !" , return_all_scores=A_ )
self.assertEqual(
nested_simplify(A_ ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
UpperCamelCase : Optional[Any] = text_classifier(["This is great !", "Something else"] , return_all_scores=A_ )
self.assertEqual(
nested_simplify(A_ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
UpperCamelCase : Union[str, Any] = text_classifier(["This is great !", "Something else"] , return_all_scores=A_ )
self.assertEqual(
nested_simplify(A_ ) , [
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] , )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
UpperCamelCase : Tuple = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
UpperCamelCase : str = text_classifier("This is great !" )
self.assertEqual(nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
UpperCamelCase : Tuple = text_classifier("This is great !" )
self.assertEqual(nested_simplify(A_ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = pipeline("text-classification" )
UpperCamelCase : Any = text_classifier("This is great !" )
self.assertEqual(nested_simplify(A_ ) , [{"label": "POSITIVE", "score": 1.0}] )
UpperCamelCase : Dict = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(A_ ) , [{"label": "NEGATIVE", "score": 1.0}] )
UpperCamelCase : List[Any] = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(A_ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = pipeline("text-classification" , framework="tf" )
UpperCamelCase : Dict = text_classifier("This is great !" )
self.assertEqual(nested_simplify(A_ ) , [{"label": "POSITIVE", "score": 1.0}] )
UpperCamelCase : Dict = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(A_ ) , [{"label": "NEGATIVE", "score": 1.0}] )
UpperCamelCase : Optional[int] = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(A_ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = TextClassificationPipeline(model=A_ , tokenizer=A_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
UpperCamelCase : Optional[int] = "HuggingFace is in"
UpperCamelCase : List[Any] = text_classifier(A_ )
self.assertEqual(nested_simplify(A_ ) , [{"label": ANY(A_ ), "score": ANY(A_ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
UpperCamelCase : Union[str, Any] = ["HuggingFace is in ", "Paris is in France"]
UpperCamelCase : Optional[int] = text_classifier(A_ )
self.assertEqual(
nested_simplify(A_ ) , [{"label": ANY(A_ ), "score": ANY(A_ )}, {"label": ANY(A_ ), "score": ANY(A_ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
UpperCamelCase : List[str] = text_classifier(A_ , top_k=A_ )
UpperCamelCase : int = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(A_ ) , [[{"label": ANY(A_ ), "score": ANY(A_ )}] * N, [{"label": ANY(A_ ), "score": ANY(A_ )}] * N] , )
UpperCamelCase : Optional[int] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
UpperCamelCase : List[Any] = text_classifier(A_ )
self.assertEqual(
nested_simplify(A_ ) , {"label": ANY(A_ ), "score": ANY(A_ )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
UpperCamelCase : int = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(A_ ):
text_classifier(A_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
UpperCamelCase : Any = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(A_ ) , [{"label": ANY(A_ ), "score": ANY(A_ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 52 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : Optional[Any] ={"do_clean_text": False, "add_prefix_space": False}
def _lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
__UpperCamelCase = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
__UpperCamelCase = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def _lowerCamelCase ( self : Union[str, Any] , **__A : List[Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def _lowerCamelCase ( self : Any , __A : Dict ):
__UpperCamelCase = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
__UpperCamelCase = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def _lowerCamelCase ( self : Union[str, Any] , __A : Any ):
__UpperCamelCase , __UpperCamelCase = self.get_input_output_texts(__A )
__UpperCamelCase = tokenizer.encode(__A , add_special_tokens=__A )
__UpperCamelCase = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def _lowerCamelCase ( self : int ):
pass # TODO add if relevant
def _lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def _lowerCamelCase ( self : Optional[int] ):
pass # TODO add if relevant
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.get_tokenizer()
# Testing tokenization
__UpperCamelCase = 'こんにちは、世界。 こんばんは、㔺界。'
__UpperCamelCase = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
__UpperCamelCase = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
__UpperCamelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCamelCase = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
__UpperCamelCase = tokens + [tokenizer.unk_token]
__UpperCamelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
__UpperCamelCase = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.get_tokenizer()
# Testing tokenization
__UpperCamelCase = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
__UpperCamelCase = 'こんにちは、、、、世界。こんばんは、、、、世界。'
__UpperCamelCase = tokenizer.encode(__A )
__UpperCamelCase = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__UpperCamelCase = 'こんにちは、世界。'
__UpperCamelCase = 'こんばんは、㔺界。😀'
__UpperCamelCase = 'こんにちは、世界。こんばんは、世界。😀'
__UpperCamelCase = tokenizer.encode(prefix_text + input_text )
__UpperCamelCase = tokenizer.encode('' , prefix_text=prefix_text + input_text )
__UpperCamelCase = tokenizer.encode(__A , prefix_text=__A )
__UpperCamelCase = tokenizer.decode(__A )
__UpperCamelCase = tokenizer.decode(__A )
__UpperCamelCase = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__UpperCamelCase = 'こんにちは、世界。'
__UpperCamelCase = 'こんばんは、㔺界。😀'
__UpperCamelCase = len(tokenizer.encode(__A ) ) - 2
__UpperCamelCase = len(tokenizer.encode(__A ) ) - 2
__UpperCamelCase = [1] + [0] * (len_prefix + len_text + 1)
__UpperCamelCase = [1] * (len_prefix + len_text + 1) + [0]
__UpperCamelCase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCamelCase = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCamelCase = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCamelCase = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__UpperCamelCase = tokenizer.encode('あンいワ' )
__UpperCamelCase = tokenizer.encode('' , prefix_text='あンいワ' )
__UpperCamelCase = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__UpperCamelCase = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
__UpperCamelCase = tokenizer(__A , padding=__A )
__UpperCamelCase = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
__UpperCamelCase = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
__UpperCamelCase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCamelCase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def _lowerCamelCase ( self : str ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _lowerCamelCase ( self : str ):
# tokenizer has no padding token
pass
| 53 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__:List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = filter(lambda lowerCAmelCase_ : p.requires_grad , model.parameters() )
__SCREAMING_SNAKE_CASE = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : List[Any] = logging.getLogger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if metric == "rouge2":
__SCREAMING_SNAKE_CASE = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
__SCREAMING_SNAKE_CASE = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
__SCREAMING_SNAKE_CASE = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
__SCREAMING_SNAKE_CASE = ModelCheckpoint(
dirpath=lowerCAmelCase_ , filename=lowerCAmelCase_ , monitor=f"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=lowerCAmelCase_ , verbose=lowerCAmelCase_ , )
class UpperCamelCase_ ( pl.Callback):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
__SCREAMING_SNAKE_CASE = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCAmelCase__ )
@rank_zero_only
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : pl.LightningModule , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=True ) -> None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__SCREAMING_SNAKE_CASE = Path(pl_module.hparams.output_dir )
if type_path == "test":
__SCREAMING_SNAKE_CASE = od / "test_results.txt"
__SCREAMING_SNAKE_CASE = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__SCREAMING_SNAKE_CASE = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
__SCREAMING_SNAKE_CASE = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=UpperCAmelCase__ )
generations_file.parent.mkdir(exist_ok=UpperCAmelCase__ )
with open(UpperCAmelCase__ , "a+" ) as writer:
for key in sorted(UpperCAmelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
__SCREAMING_SNAKE_CASE = metrics[key]
if isinstance(UpperCAmelCase__ , torch.Tensor ):
__SCREAMING_SNAKE_CASE = val.item()
__SCREAMING_SNAKE_CASE = F"""{key}: {val:.6f}\n"""
writer.write(UpperCAmelCase__ )
if not save_generations:
return
if "preds" in metrics:
__SCREAMING_SNAKE_CASE = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(UpperCAmelCase__ )
@rank_zero_only
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ) -> List[Any]:
try:
__SCREAMING_SNAKE_CASE = pl_module.model.model.num_parameters()
except AttributeError:
__SCREAMING_SNAKE_CASE = pl_module.model.num_parameters()
__SCREAMING_SNAKE_CASE = count_trainable_parameters(UpperCAmelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : pl.LightningModule ) -> str:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCAmelCase__ , UpperCAmelCase__ , "test" )
@rank_zero_only
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : Optional[int] ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 54 | """simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a , a="attention" ):
__a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _lowerCamelCase( a , a , a , a=False ):
if split_mlp_wi:
__a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a = (wi_a, wi_a)
else:
__a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _lowerCamelCase( a , a , a , a ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _lowerCamelCase( a , *, a , a ):
__a = traverse_util.flatten_dict(variables["target"] )
__a = {"/".join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , a )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old["token_embedder/embedding"]
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "encoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
"encoder/relpos_bias/rel_embedding"
].T
__a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "decoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old["decoder/decoder_norm/scale"]
__a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase( a , a ):
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__a = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase( a , a , a , a ):
__a = checkpoints.load_tax_checkpoint(a )
__a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__a = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def _lowerCamelCase( a , a , a , a = False ):
__a = TaConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(a )
else:
__a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 261 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : Union[str, Any] = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 55 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = StableUnCLIPImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : List[Any] = frozenset([] )
def a__ ( self ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__a = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 261 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : Optional[int] = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__UpperCAmelCase, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCAmelCase, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class a ( _lowerCamelCase ):
snake_case_ = ["pixel_values"]
def __init__( self : Union[str, Any] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ):
super().__init__(**lowercase_ )
snake_case_ = size if size is not None else {'''shortest_edge''': 224}
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
snake_case_ = get_size_dict(lowercase_ , param_name='''crop_size''' )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" in size:
snake_case_ = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_ )
elif "height" in size and "width" in size:
snake_case_ = (size['''height'''], size['''width'''])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ):
snake_case_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ):
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def A_ ( self : List[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case_ = to_numpy_array(lowercase_ )
if do_resize:
snake_case_ = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ )
if do_center_crop:
snake_case_ = self.center_crop(lowercase_ , size=lowercase_ )
if do_rescale:
snake_case_ = self.rescale(image=lowercase_ , scale=lowercase_ )
if do_normalize:
snake_case_ = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ )
snake_case_ = to_channel_dimension_format(lowercase_ , lowercase_ )
return image
def A_ ( self : Tuple , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Any , ):
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowercase_ , param_name='''crop_size''' )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
snake_case_ = make_batched(lowercase_ )
snake_case_ = [
[
self._preprocess_image(
image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , )
for img in video
]
for video in videos
]
snake_case_ = {'''pixel_values''': videos}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 56 | """simple docstring"""
import random
def _lowerCamelCase( a , a , a ):
__a = a[left_index]
__a = left_index + 1
for j in range(left_index + 1 , a ):
if a[j] < pivot:
__a , __a = a[i], a[j]
i += 1
__a , __a = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase( a , a , a ):
if left < right:
__a = random.randint(a , right - 1 )
__a , __a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a = partition(a , a , a )
quick_sort_random(
a , a , a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase( ):
__a = input("Enter numbers separated by a comma:\n" ).strip()
__a = [int(a ) for item in user_input.split("," )]
quick_sort_random(a , 0 , len(a ) )
print(a )
if __name__ == "__main__":
main()
| 261 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self , __a ):
with open(__a , encoding="utf-8" ) as input_file:
__lowerCAmelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__lowerCAmelCase = input_file.read()
__lowerCAmelCase = regexp.search(__a )
return match
def snake_case ( self , __a ):
with open(__a , encoding="utf-8" ) as input_file:
__lowerCAmelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__lowerCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase = regexp.finditer(__a )
__lowerCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def snake_case ( self ):
__lowerCAmelCase = Path("./datasets" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__a ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def snake_case ( self ):
__lowerCAmelCase = Path("./datasets" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__a ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 57 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( __lowerCamelCase : int ) ->list[int]:
if num <= 0:
_SCREAMING_SNAKE_CASE = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = [True] * (num + 1)
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = int(math.sqrt(__lowerCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__lowerCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , __lowerCamelCase ):
if sieve[i] is True:
_SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__lowerCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 58 | """simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCamelCase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
for attribute in key.split("." ):
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : Union[str, Any] = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case : Optional[Any] = value
elif weight_type == "weight_g":
snake_case : str = value
elif weight_type == "weight_v":
snake_case : Dict = value
elif weight_type == "bias":
snake_case : Dict = value
else:
snake_case : Tuple = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Any ):
snake_case : List[Any] = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : str = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case : Dict = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Dict = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : int = "weight_g"
elif "weight_v" in name:
snake_case : Dict = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : List[Any] = "weight"
else:
snake_case : List[str] = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Any ):
snake_case : Dict = full_name.split("conv_layers." )[-1]
snake_case : List[Any] = name.split("." )
snake_case : Tuple = int(items[0] )
snake_case : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=None ):
# load the pre-trained checkpoints
snake_case : Optional[int] = torch.load(__lowerCamelCase )
snake_case : List[Any] = WavLMConfigOrig(checkpoint["cfg"] )
snake_case : Optional[int] = WavLMOrig(__lowerCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
snake_case : str = WavLMConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : Optional[int] = WavLMConfig()
snake_case : Any = WavLMModel(__lowerCamelCase )
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavlm.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCamelCase = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 59 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """upernet"""
def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = backbone_config.get("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(lowerCamelCase )
__a = backbone_config
__a = hidden_size
__a = initializer_range
__a = pool_scales
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_in_channels
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = loss_ignore_index
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 261 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
snake_case__ : List[Any] = logging.get_logger(__name__)
def _snake_case ( _snake_case : Tuple ):
if isinstance(_snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_snake_case ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class snake_case_( a__ ):
__UpperCamelCase = ['''pixel_values''']
def __init__( self : Optional[int] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : Tuple , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_5_6}
lowerCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowerCAmelCase : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase : Dict = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
lowerCAmelCase : Any = do_resize
lowerCAmelCase : Union[str, Any] = size
lowerCAmelCase : List[str] = do_center_crop
lowerCAmelCase : int = crop_size
lowerCAmelCase : Dict = resample
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Any = rescale_factor
lowerCAmelCase : List[Any] = offset
lowerCAmelCase : Tuple = do_normalize
lowerCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" in size:
lowerCAmelCase : List[str] = get_resize_output_image_size(UpperCamelCase_ , size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
elif "height" in size and "width" in size:
lowerCAmelCase : Any = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Union[str, Any] , ):
lowerCAmelCase : Tuple = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : List[str] = image.astype(np.floataa )
if offset:
lowerCAmelCase : Union[str, Any] = image - (scale / 2)
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase : List[str] = to_numpy_array(UpperCamelCase_ )
if do_resize:
lowerCAmelCase : Optional[int] = self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ )
if do_center_crop:
lowerCAmelCase : List[str] = self.center_crop(UpperCamelCase_ , size=UpperCamelCase_ )
if do_rescale:
lowerCAmelCase : str = self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , offset=UpperCamelCase_ )
if do_normalize:
lowerCAmelCase : Optional[int] = self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ )
lowerCAmelCase : str = to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ )
return image
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : List[str] , ):
lowerCAmelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Any = resample if resample is not None else self.resample
lowerCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : str = offset if offset is not None else self.offset
lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Any = image_std if image_std is not None else self.image_std
lowerCAmelCase : List[str] = size if size is not None else self.size
lowerCAmelCase : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase : Any = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowerCAmelCase : List[str] = make_batched(UpperCamelCase_ )
lowerCAmelCase : Dict = [
[
self._preprocess_image(
image=UpperCamelCase_ , do_resize=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , crop_size=UpperCamelCase_ , do_rescale=UpperCamelCase_ , rescale_factor=UpperCamelCase_ , offset=UpperCamelCase_ , do_normalize=UpperCamelCase_ , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ , data_format=UpperCamelCase_ , )
for img in video
]
for video in videos
]
lowerCAmelCase : Optional[Any] = {'''pixel_values''': videos}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 60 | """simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : int = scheduler_class(**lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = 10, 0.0
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for t in scheduler.timesteps:
UpperCAmelCase_ : Dict = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase_ , eta=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 10, 0.0
scheduler.set_timesteps(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
UpperCAmelCase_ : Any = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : int = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : List[Any] = samplea.shape[0]
UpperCAmelCase_ : int = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ : int = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
UpperCAmelCase_ : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ : Optional[Any] = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.full_loop()
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : List[str] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : Dict = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : int = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 61 | """simple docstring"""
import operator
def _lowerCamelCase( a , a = False , a = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(a ):
if _operator(a , sublist[-1] ):
sublist.append(a )
arr.pop(a )
# merging sublist into solution list
if not solution:
solution.extend(a )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(a ):
if not _operator(a , a ):
solution.insert(a , a )
break
else:
solution.append(a )
strand_sort(a , a , a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 261 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_A = logging.get_logger(__name__) # pylint: disable=invalid-name
_A = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=8 ):
__UpperCamelCase =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCamelCase =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , ) -> int:
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
__UpperCamelCase =2 ** (len(self.movq.config.block_out_channels ) - 1)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
if latents is None:
__UpperCamelCase =randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__UpperCamelCase =latents.to(A_ )
__UpperCamelCase =latents * scheduler.init_noise_sigma
return latents
def _a ( self , A_=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__UpperCamelCase =torch.device(f'cuda:{gpu_id}' )
__UpperCamelCase =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def _a ( self , A_=0 ) -> Tuple:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__UpperCamelCase =torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCamelCase =None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCamelCase , __UpperCamelCase =cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
__UpperCamelCase =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ) -> Dict:
__UpperCamelCase =self._execution_device
__UpperCamelCase =guidance_scale > 1.0
if isinstance(A_ , A_ ):
__UpperCamelCase =torch.cat(A_ , dim=0 )
__UpperCamelCase =image_embeds.shape[0] * num_images_per_prompt
if isinstance(A_ , A_ ):
__UpperCamelCase =torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
__UpperCamelCase =image_embeds.repeat_interleave(A_ , dim=0 )
__UpperCamelCase =negative_image_embeds.repeat_interleave(A_ , dim=0 )
__UpperCamelCase =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
__UpperCamelCase =self.scheduler.timesteps
__UpperCamelCase =self.unet.config.in_channels
__UpperCamelCase , __UpperCamelCase =downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
__UpperCamelCase =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
__UpperCamelCase =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCamelCase ={'image_embeds': image_embeds}
__UpperCamelCase =self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
__UpperCamelCase , __UpperCamelCase =noise_pred.split(latents.shape[1] , dim=1 )
__UpperCamelCase , __UpperCamelCase =noise_pred.chunk(2 )
__UpperCamelCase , __UpperCamelCase =variance_pred.chunk(2 )
__UpperCamelCase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCamelCase =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCamelCase , __UpperCamelCase =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase =self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
__UpperCamelCase =self.movq.decode(A_ , force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__UpperCamelCase =image * 0.5 + 0.5
__UpperCamelCase =image.clamp(0 , 1 )
__UpperCamelCase =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCamelCase =self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 62 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCamelCase ( lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( ) -> Iterator[int]:
_a = 2
while True:
if is_prime(lowercase ):
yield num
num += 1
def _lowerCamelCase ( lowercase : int = 200_0000 ) -> int:
return sum(takewhile(lambda lowercase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = RoCBertTokenizer
lowercase__ = None
lowercase__ = False
lowercase__ = True
lowercase__ = filter_non_english
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
_snake_case : Tuple = {}
_snake_case : Any = {}
for i, value in enumerate(a_ ):
_snake_case : List[str] = i
_snake_case : Optional[int] = i
_snake_case : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""word_shape_file"""] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file, """w""", encoding="""utf-8""" ) as word_shape_writer:
json.dump(a_, a_, ensure_ascii=a_ )
with open(self.word_pronunciation_file, """w""", encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(a_, a_, ensure_ascii=a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file )
_snake_case : str = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(a_, ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(a_ ), [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(a_ ), [5, 6, 2, 5, 7, 8] )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ), ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = RoCBertBasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ), ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = RoCBertBasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""h\u00E9llo"""] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = RoCBertBasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[int] = RoCBertBasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ), ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : str = RoCBertBasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : str = RoCBertBasicTokenizer(do_lower_case=a_, never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ), ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_snake_case : Optional[Any] = {}
for i, token in enumerate(a_ ):
_snake_case : Dict = i
_snake_case : Optional[Any] = RoCBertWordpieceTokenizer(vocab=a_, unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ), [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ), ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ), ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a_ ) for t in ["""Test""", """\xad""", """test"""]], [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
_snake_case : Optional[int] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(a_ ) for t in ["""Test""", """\xad""", """test"""]], [["""[UNK]"""], [], ["""[UNK]"""]] )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : List[Any] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_snake_case : List[Any] = tokenizer_r.encode_plus(
a_, return_attention_mask=a_, return_token_type_ids=a_, return_offsets_mapping=a_, add_special_tokens=a_, )
_snake_case : Optional[Any] = tokenizer_r.do_lower_case if hasattr(a_, """do_lower_case""" ) else False
_snake_case : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results], tokens["""offset_mapping"""] )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = ["""的""", """人""", """有"""]
_snake_case : Any = """""".join(a_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : int = True
_snake_case : Tuple = self.tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : Optional[Any] = tokenizer_p.encode(a_, add_special_tokens=a_ )
_snake_case : int = tokenizer_r.encode(a_, add_special_tokens=a_ )
_snake_case : Optional[Any] = tokenizer_r.convert_ids_to_tokens(a_ )
_snake_case : Optional[int] = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a_, a_ )
self.assertListEqual(a_, a_ )
_snake_case : List[str] = False
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : str = self.tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : Optional[Any] = tokenizer_r.encode(a_, add_special_tokens=a_ )
_snake_case : Any = tokenizer_p.encode(a_, add_special_tokens=a_ )
_snake_case : Optional[int] = tokenizer_r.convert_ids_to_tokens(a_ )
_snake_case : Tuple = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case : List[Any] = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(a_ )
]
self.assertListEqual(a_, a_ )
self.assertListEqual(a_, a_ )
@slow
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file )
_snake_case : Tuple = tokenizer.encode("""你好""", add_special_tokens=a_ )
_snake_case : Optional[int] = tokenizer.encode("""你是谁""", add_special_tokens=a_ )
_snake_case : str = tokenizer.build_inputs_with_special_tokens(a_ )
_snake_case : int = tokenizer.build_inputs_with_special_tokens(a_, a_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : List[Any] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : int = """你好,你是谁"""
_snake_case : List[str] = tokenizer.tokenize(a_ )
_snake_case : List[str] = tokenizer.convert_tokens_to_ids(a_ )
_snake_case : List[Any] = tokenizer.convert_tokens_to_shape_ids(a_ )
_snake_case : Optional[Any] = tokenizer.convert_tokens_to_pronunciation_ids(a_ )
_snake_case : Dict = tokenizer.prepare_for_model(
a_, a_, a_, add_special_tokens=a_ )
_snake_case : Dict = tokenizer.encode_plus(a_, add_special_tokens=a_ )
self.assertEqual(a_, a_ )
| 64 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(a , a , a , a )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a , a , a , a ):
__a = full_name.split("adaptor." )[-1]
__a = name.split("." )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a , a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(a , a , bias=a )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ):
__a = WavaVecaConfig.from_pretrained(
a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , )
__a = MBartConfig.from_pretrained(a )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a )
recursively_load_weights_wavaveca(model.encoder , a )
# load decoder weights
__a = MBartForCausalLM(a )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__a = SpeechEncoderDecoderModel(encoder=a , decoder=a )
__a = False
__a = MBartaaTokenizer(a )
tokenizer.save_pretrained(a )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = "mbart50"
__a = "wav2vec2"
__a = tokenizer.eos_token_id
__a = 2_5_0_0_0_4
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a )
hf_wavavec.save_pretrained(a )
feature_extractor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 | 0 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Union[str, Any] , **snake_case: int ) -> Any:
super().__init__(**snake_case )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , """vision""" )
self.check_model_type(snake_case )
def __call__( self: Union[str, Any] , snake_case: Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case: Union[str, List[str]] = None , **snake_case: Union[str, Any] , ) -> Dict:
if "text_queries" in kwargs:
snake_case_ :Optional[Any] = kwargs.pop("""text_queries""" )
if isinstance(snake_case , (str, Image.Image) ):
snake_case_ :Tuple = {"""image""": image, """candidate_labels""": candidate_labels}
else:
snake_case_ :Dict = image
snake_case_ :Optional[Any] = super().__call__(snake_case , **snake_case )
return results
def lowerCAmelCase_ ( self: Any , **snake_case: Dict ) -> Tuple:
snake_case_ :int = {}
if "threshold" in kwargs:
snake_case_ :Optional[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
snake_case_ :Any = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Any ) -> str:
snake_case_ :List[str] = load_image(inputs["""image"""] )
snake_case_ :int = inputs["""candidate_labels"""]
if isinstance(snake_case , snake_case ):
snake_case_ :Union[str, Any] = candidate_labels.split(""",""" )
snake_case_ :List[str] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case ):
snake_case_ :Dict = self.tokenizer(snake_case , return_tensors=self.framework )
snake_case_ :Optional[int] = self.image_processor(snake_case , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Optional[int] ) -> Optional[Any]:
snake_case_ :Union[str, Any] = model_inputs.pop("""target_size""" )
snake_case_ :Any = model_inputs.pop("""candidate_label""" )
snake_case_ :Optional[int] = model_inputs.pop("""is_last""" )
snake_case_ :Optional[Any] = self.model(**snake_case )
snake_case_ :Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase_ ( self: List[Any] , snake_case: List[str] , snake_case: Optional[int]=0.1 , snake_case: Optional[Any]=None ) -> Tuple:
snake_case_ :List[Any] = []
for model_output in model_outputs:
snake_case_ :List[str] = model_output["""candidate_label"""]
snake_case_ :Any = BaseModelOutput(snake_case )
snake_case_ :int = self.image_processor.post_process_object_detection(
outputs=snake_case , threshold=snake_case , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
snake_case_ :Union[str, Any] = outputs["""scores"""][index].item()
snake_case_ :Optional[int] = self._get_bounding_box(outputs["""boxes"""][index][0] )
snake_case_ :Dict = {"""score""": score, """label""": label, """box""": box}
results.append(snake_case )
snake_case_ :List[str] = sorted(snake_case , key=lambda snake_case : x["score"] , reverse=snake_case )
if top_k:
snake_case_ :Any = results[:top_k]
return results
def lowerCAmelCase_ ( self: Optional[int] , snake_case: "torch.Tensor" ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
snake_case_, snake_case_, snake_case_, snake_case_ :Dict = box.int().tolist()
snake_case_ :Any = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 66 | """simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__)
def _lowerCamelCase( a ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(a , "git_log.json" ) , "w" ) as f:
json.dump(a , a , indent=4 )
def _lowerCamelCase( a ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["WORLD_SIZE"] )
__a = int(os.environ["N_GPU_NODE"] )
__a = int(os.environ["RANK"] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _lowerCamelCase( a ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 261 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 261 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: List[str]=1 ) -> str:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any=0 ) -> Dict:
'''simple docstring'''
A__ = []
for old_item in old_list:
A__ = old_item.replace("in_layers.0" , "norm1" )
A__ = new_item.replace("in_layers.2" , "conv1" )
A__ = new_item.replace("out_layers.0" , "norm2" )
A__ = new_item.replace("out_layers.3" , "conv2" )
A__ = new_item.replace("emb_layers.1" , "time_emb_proj" )
A__ = new_item.replace("skip_connection" , "conv_shortcut" )
A__ = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Tuple=0 ) -> Dict:
'''simple docstring'''
A__ = []
for old_item in old_list:
A__ = old_item
A__ = new_item.replace("norm.weight" , "group_norm.weight" )
A__ = new_item.replace("norm.bias" , "group_norm.bias" )
A__ = new_item.replace("proj_out.weight" , "proj_attn.weight" )
A__ = new_item.replace("proj_out.bias" , "proj_attn.bias" )
A__ = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int=None , SCREAMING_SNAKE_CASE_: Tuple=None , SCREAMING_SNAKE_CASE_: List[str]=None ) -> int:
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
A__ = old_checkpoint[path]
A__ = old_tensor.shape[0] // 3
A__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
A__ = old_tensor.shape[0] // config["num_head_channels"] // 3
A__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
A__ , A__ , A__ = old_tensor.split(channels // num_heads , dim=1 )
A__ = query.reshape(SCREAMING_SNAKE_CASE_ )
A__ = key.reshape(SCREAMING_SNAKE_CASE_ )
A__ = value.reshape(SCREAMING_SNAKE_CASE_ )
for path in paths:
A__ = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
A__ = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
A__ = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
A__ = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
A__ = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
A__ = old_checkpoint[path["old"]][:, :, 0]
else:
A__ = old_checkpoint[path["old"]]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]:
'''simple docstring'''
A__ = {}
A__ = checkpoint["time_embed.0.weight"]
A__ = checkpoint["time_embed.0.bias"]
A__ = checkpoint["time_embed.2.weight"]
A__ = checkpoint["time_embed.2.bias"]
A__ = checkpoint["input_blocks.0.0.weight"]
A__ = checkpoint["input_blocks.0.0.bias"]
A__ = checkpoint["out.0.weight"]
A__ = checkpoint["out.0.bias"]
A__ = checkpoint["out.2.weight"]
A__ = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
A__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
A__ = {
layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the middle blocks only
A__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
A__ = {
layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the output blocks only
A__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
A__ = {
layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
A__ = (i - 1) // (config["num_res_blocks"] + 1)
A__ = (i - 1) % (config["num_res_blocks"] + 1)
A__ = [key for key in input_blocks[i] if F'input_blocks.{i}.0' in key]
A__ = [key for key in input_blocks[i] if F'input_blocks.{i}.1' in key]
if F'input_blocks.{i}.0.op.weight' in checkpoint:
A__ = checkpoint[
F'input_blocks.{i}.0.op.weight'
]
A__ = checkpoint[
F'input_blocks.{i}.0.op.bias'
]
continue
A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
A__ = {"old": F'input_blocks.{i}.0', "new": F'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
A__ = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path, resnet_op] , config=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ):
A__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
A__ = {
"old": F'input_blocks.{i}.1',
"new": F'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
A__ = {
F'input_blocks.{i}.1.qkv.bias': {
"key": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
"query": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
"value": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'input_blocks.{i}.1.qkv.weight': {
"key": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
"query": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
"value": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , )
A__ = middle_blocks[0]
A__ = middle_blocks[1]
A__ = middle_blocks[2]
A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
A__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
A__ = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
A__ = i // (config["num_res_blocks"] + 1)
A__ = i % (config["num_res_blocks"] + 1)
A__ = [shave_segments(SCREAMING_SNAKE_CASE_ , 2 ) for name in output_blocks[i]]
A__ = {}
for layer in output_block_layers:
A__ , A__ = layer.split("." )[0], shave_segments(SCREAMING_SNAKE_CASE_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(SCREAMING_SNAKE_CASE_ )
else:
A__ = [layer_name]
if len(SCREAMING_SNAKE_CASE_ ) > 1:
A__ = [key for key in output_blocks[i] if F'output_blocks.{i}.0' in key]
A__ = [key for key in output_blocks[i] if F'output_blocks.{i}.1' in key]
A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
A__ = {"old": F'output_blocks.{i}.0', "new": F'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
A__ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
A__ = checkpoint[
F'output_blocks.{i}.{index}.conv.weight'
]
A__ = checkpoint[
F'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(SCREAMING_SNAKE_CASE_ ) == 2:
A__ = []
if len(SCREAMING_SNAKE_CASE_ ):
A__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
A__ = {
"old": F'output_blocks.{i}.1',
"new": F'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
A__ = {
F'output_blocks.{i}.1.qkv.bias': {
"key": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
"query": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
"value": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'output_blocks.{i}.1.qkv.weight': {
"key": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
"query": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
"value": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=SCREAMING_SNAKE_CASE_ , )
else:
A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
A__ = ".".join(["output_blocks", str(SCREAMING_SNAKE_CASE_ ), path["old"]] )
A__ = ".".join(["up_blocks", str(SCREAMING_SNAKE_CASE_ ), "resnets", str(SCREAMING_SNAKE_CASE_ ), path["new"]] )
A__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowerCAmelCase__ = json.loads(f.read())
lowerCAmelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowerCAmelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowerCAmelCase__ = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
lowerCAmelCase__ = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
lowerCAmelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 68 | """simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )]
def __str__( self ):
__a = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(lowerCamelCase , len(str(lowerCamelCase ) ) )
__a = F"%{max_element_length}s"
# Make string and return
def single_line(lowerCamelCase ) -> str:
nonlocal string_format_identifier
__a = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def a__ ( self , lowerCamelCase ):
if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCamelCase , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
__a = value
def __add__( self , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , lowerCamelCase ):
return self + (-another)
def __mul__( self , lowerCamelCase ):
if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = F"Unsupported type given for another ({type(lowerCamelCase )})"
raise TypeError(lowerCamelCase )
def a__ ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCamelCase( ):
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(F"a^(-1) is {ainv}" )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" )
def _lowerCamelCase( ):
import doctest
doctest.testmod()
testa()
| 261 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCamelCase = 0
__UpperCamelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCamelCase = tuple[int, int]
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> None:
snake_case_ = pos_x
snake_case_ = pos_y
snake_case_ = (pos_y, pos_x)
snake_case_ = goal_x
snake_case_ = goal_y
snake_case_ = g_cost
snake_case_ = parent
snake_case_ = self.calculate_heuristic()
snake_case_ = self.g_cost + self.h_cost
def a_ ( self) -> float:
snake_case_ = self.pos_x - self.goal_x
snake_case_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase__) + abs(lowerCAmelCase__)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self, lowerCAmelCase__) -> bool:
return self.f_cost < other.f_cost
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = Node(start[1], start[0], goal[1], goal[0], 0, lowerCAmelCase__)
snake_case_ = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, lowerCAmelCase__)
snake_case_ = [self.start]
snake_case_ = []
snake_case_ = False
def a_ ( self) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase__)
self.closed_nodes.append(lowerCAmelCase__)
snake_case_ = self.get_successors(lowerCAmelCase__)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase__)
else:
# retrieve the best current path
snake_case_ = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase__)
else:
self.open_nodes.append(lowerCAmelCase__)
return [self.start.pos]
def a_ ( self, lowerCAmelCase__) -> list[Node]:
snake_case_ = []
for action in delta:
snake_case_ = parent.pos_x + action[1]
snake_case_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(lowerCAmelCase__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase__, lowerCAmelCase__, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, lowerCAmelCase__, ))
return successors
def a_ ( self, lowerCAmelCase__) -> list[TPosition]:
snake_case_ = node
snake_case_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
snake_case_ = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = False
def a_ ( self) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case_ = self.fwd_astar.open_nodes.pop(0)
snake_case_ = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase__, lowerCAmelCase__)
self.fwd_astar.closed_nodes.append(lowerCAmelCase__)
self.bwd_astar.closed_nodes.append(lowerCAmelCase__)
snake_case_ = current_bwd_node
snake_case_ = current_fwd_node
snake_case_ = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase__),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase__),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase__)
else:
# retrieve the best current path
snake_case_ = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase__))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase__)
else:
astar.open_nodes.append(lowerCAmelCase__)
return [self.fwd_astar.start.pos]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> list[TPosition]:
snake_case_ = self.fwd_astar.retrace_path(lowerCAmelCase__)
snake_case_ = self.bwd_astar.retrace_path(lowerCAmelCase__)
bwd_path.pop()
bwd_path.reverse()
snake_case_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCamelCase = (0, 0)
__UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCamelCase = time.time()
__UpperCamelCase = AStar(init, goal)
__UpperCamelCase = a_star.search()
__UpperCamelCase = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
__UpperCamelCase = time.time()
__UpperCamelCase = BidirectionalAStar(init, goal)
__UpperCamelCase = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 69 | """simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase( a , a , a , a , a=True , a="pt" ):
__a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , )
def _lowerCamelCase( a , a , a=None , ):
__a = input_ids.ne(a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ):
super().__init__()
__a = Path(lowerCamelCase ).joinpath(type_path + ".source" )
__a = Path(lowerCamelCase ).joinpath(type_path + ".target" )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" )
__a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" )
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" )
__a = source_inputs["input_ids"].squeeze()
__a = target_inputs["input_ids"].squeeze()
__a = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( lowerCamelCase ):
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def a__ ( self , lowerCamelCase ):
__a = torch.stack([x["input_ids"] for x in batch] )
__a = torch.stack([x["attention_mask"] for x in batch] )
__a = torch.stack([x["decoder_input_ids"] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = trim_batch(lowerCamelCase , lowerCamelCase )
__a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase )
__a = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__)
def _lowerCamelCase( a ):
return list(itertools.chain.from_iterable(a ) )
def _lowerCamelCase( a ):
__a = get_git_info()
save_json(a , os.path.join(a , "git_log.json" ) )
def _lowerCamelCase( a , a , a=4 , **a ):
with open(a , "w" ) as f:
json.dump(a , a , indent=a , **a )
def _lowerCamelCase( a ):
with open(a ) as f:
return json.load(a )
def _lowerCamelCase( ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase( a , a ):
return list(map(a , a ) )
def _lowerCamelCase( a , a ):
with open(a , "wb" ) as f:
return pickle.dump(a , a )
def _lowerCamelCase( a ):
def remove_articles(a ):
return re.sub(R"\b(a|an|the)\b" , " " , a )
def white_space_fix(a ):
return " ".join(text.split() )
def remove_punc(a ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) )
def _lowerCamelCase( a , a ):
__a = normalize_answer(a ).split()
__a = normalize_answer(a ).split()
__a = Counter(a ) & Counter(a )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(a )
__a = 1.0 * num_same / len(a )
__a = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase( a , a ):
return normalize_answer(a ) == normalize_answer(a )
def _lowerCamelCase( a , a ):
assert len(a ) == len(a )
__a = 0
for hypo, pred in zip(a , a ):
em += exact_match_score(a , a )
if len(a ) > 0:
em /= len(a )
return {"em": em}
def _lowerCamelCase( a ):
return model_prefix.startswith("rag" )
def _lowerCamelCase( a , a , a ):
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = "dropout_rate"
for p in extra_params:
if getattr(a , a , a ):
if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(a ) )
delattr(a , a )
continue
__a = p if hasattr(a , a ) else equivalent_param[p]
setattr(a , a , getattr(a , a ) )
delattr(a , a )
return hparams, config
| 261 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase :
def __init__( self : Tuple , __snake_case : Optional[int] , __snake_case : Dict=13 , __snake_case : str=10 , __snake_case : List[Any]=3 , __snake_case : Dict=2 , __snake_case : List[str]=2 , __snake_case : List[Any]=2 , __snake_case : str=True , __snake_case : List[Any]=True , __snake_case : str=32 , __snake_case : List[Any]=5 , __snake_case : List[Any]=4 , __snake_case : Any=37 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=0.1 , __snake_case : Any=10 , __snake_case : Any=0.02 , __snake_case : List[Any]=0.9 , __snake_case : Union[str, Any]=None , ) -> Optional[int]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = tubelet_size
_lowerCAmelCase = num_frames
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = mask_ratio
_lowerCAmelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_lowerCAmelCase = int(mask_ratio * self.seq_length )
def lowercase__ ( self : Optional[Any] ) -> str:
_lowerCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ) -> Optional[int]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[int] , __snake_case : str , __snake_case : List[str] , __snake_case : int ) -> Optional[Any]:
_lowerCAmelCase = VideoMAEModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , __snake_case : str , __snake_case : int , __snake_case : List[str] ) -> Optional[Any]:
_lowerCAmelCase = VideoMAEForPreTraining(__snake_case )
model.to(__snake_case )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowerCAmelCase = torch.ones((self.num_masks,) )
_lowerCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_lowerCAmelCase = mask.expand(self.batch_size , -1 ).bool()
_lowerCAmelCase = model(__snake_case , __snake_case )
# model only returns predictions for masked patches
_lowerCAmelCase = mask.sum().item()
_lowerCAmelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase__ ( self : List[Any] ) -> Dict:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: str = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_lowercase: List[str] = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_lowercase: str = False
_lowercase: List[str] = False
_lowercase: List[str] = False
_lowercase: int = False
def lowercase__ ( self : List[str] ) -> str:
_lowerCAmelCase = VideoMAEModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int]=False ) -> List[Any]:
_lowerCAmelCase = copy.deepcopy(__snake_case )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowerCAmelCase = torch.ones((self.model_tester.num_masks,) )
_lowerCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_lowerCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
_lowerCAmelCase = bool_masked_pos.to(__snake_case )
if return_labels:
if model_class in [
*get_values(__snake_case ),
]:
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def lowercase__ ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def lowercase__ ( self : List[Any] ) -> str:
pass
def lowercase__ ( self : Dict ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def lowercase__ ( self : str ) -> Any:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__snake_case )
@slow
def lowercase__ ( self : int ) -> Any:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = VideoMAEModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowercase__ ( self : List[str] ) -> Tuple:
if not self.has_attentions:
pass
else:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_lowerCAmelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_lowerCAmelCase = len(__snake_case )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 1 , len(__snake_case ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase__ ( self : int ) -> List[str]:
def check_hidden_states_output(__snake_case : List[str] , __snake_case : str , __snake_case : List[Any] ):
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__snake_case ) , __snake_case )
_lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_lowerCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : List[Any] ) -> str:
pass
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
_lowerCAmelCase = np.load(lowerCAmelCase )
return list(lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Dict ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Dict ) -> str:
_lowerCAmelCase = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
__snake_case )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_video()
_lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**__snake_case )
# verify the logits
_lowerCAmelCase = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __snake_case )
_lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
@slow
def lowercase__ ( self : List[str] ) -> Optional[int]:
_lowerCAmelCase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(__snake_case )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_video()
_lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
# add boolean mask, indicating which patches to mask
_lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
_lowerCAmelCase = torch.load(__snake_case )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**__snake_case )
# verify the logits
_lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
_lowerCAmelCase = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=__snake_case )
self.assertEqual(outputs.logits.shape , __snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_lowerCAmelCase = torch.tensor([0.51_42] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.loss , __snake_case , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_lowerCAmelCase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=__snake_case ).to(
__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(**__snake_case )
_lowerCAmelCase = torch.tensor(torch.tensor([0.64_69] ) , device=__snake_case )
self.assertTrue(torch.allclose(outputs.loss , __snake_case , atol=1E-4 ) )
| 70 | """simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261 | 0 |
from __future__ import annotations
import requests
def A ( a_ ) -> dict:
__UpperCamelCase : int =F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(a_ ).json()
def A ( a_ = 10 ) -> list[dict]:
__UpperCamelCase : str ='https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
__UpperCamelCase : List[str] =requests.get(a_ ).json()[:max_stories]
return [get_hackernews_story(a_ ) for story_id in story_ids]
def A ( a_ = 10 ) -> str:
__UpperCamelCase : List[str] =hackernews_top_stories(a_ )
return "\n".join('* [{title}]({url})'.format(**a_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 71 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = kwargs.pop("padding_side" , "right" )
__a = kwargs.pop("return_attention_mask" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
__a = processed_features[self.model_input_names[0]]
__a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
__a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__a = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
__a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
__a = "tf"
elif is_torch_tensor(lowerCamelCase ):
__a = "pt"
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
__a = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(lowerCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__a = to_numpy(lowerCamelCase )
else:
__a = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
__a = processed_features[self.model_input_names[0]]
__a = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__a = []
for i in range(lowerCamelCase ):
__a = {k: v[i] for k, v in processed_features.items()}
# truncation
__a = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__a = PaddingStrategy.MAX_LENGTH
__a = {}
for i in range(lowerCamelCase ):
# padding
__a = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__a = []
if value.dtype is np.dtype(np.floataa ):
__a = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ):
__a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__a = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__a = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__a = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (0, difference) )
__a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
__a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__a = processed_features["attention_mask"][:max_length]
return processed_features
def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
__a = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = padding
else:
__a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 261 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase__ = '''pt'''
elif is_tf_available():
lowerCAmelCase__ = '''tf'''
else:
lowerCAmelCase__ = '''jax'''
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Union[str, Any] = PerceiverTokenizer
snake_case__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
super().setUp()
_lowerCamelCase : Any = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def SCREAMING_SNAKE_CASE ( self : int , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Tuple=2_0 , __lowerCAmelCase : List[str]=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = []
for i in range(len(__lowerCAmelCase ) ):
try:
_lowerCamelCase : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowerCamelCase : Any = list(filter(lambda __lowerCAmelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
_lowerCamelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
_lowerCamelCase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCamelCase : List[Any] = [t[0] for t in toks]
# Ensure consistency
_lowerCamelCase : Any = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
_lowerCamelCase : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
_lowerCamelCase : Any = ''' ''' + output_txt
_lowerCamelCase : int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.perceiver_tokenizer
_lowerCamelCase : Optional[Any] = '''Unicode €.'''
_lowerCamelCase : str = tokenizer(__lowerCAmelCase )
_lowerCamelCase : List[Any] = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowerCAmelCase )
# decoding
_lowerCamelCase : Union[str, Any] = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , '''[CLS]Unicode €.[SEP]''' )
_lowerCamelCase : Union[str, Any] = tokenizer('''e è é ê ë''' )
_lowerCamelCase : int = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowerCAmelCase )
# decoding
_lowerCamelCase : List[Any] = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.perceiver_tokenizer
_lowerCamelCase : List[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
_lowerCamelCase : Dict = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
_lowerCamelCase : Optional[Any] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
if FRAMEWORK != "jax":
_lowerCamelCase : str = list(batch.input_ids.numpy()[0] )
else:
_lowerCamelCase : Any = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.perceiver_tokenizer
_lowerCamelCase : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_lowerCamelCase : Union[str, Any] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowerCAmelCase )
self.assertIn('''attention_mask''' , __lowerCAmelCase )
self.assertNotIn('''decoder_input_ids''' , __lowerCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.perceiver_tokenizer
_lowerCamelCase : str = [
'''Summary of the text.''',
'''Another summary.''',
]
_lowerCamelCase : int = tokenizer(
text_target=__lowerCAmelCase , max_length=3_2 , padding='''max_length''' , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase : Optional[int] = tempfile.mkdtemp()
_lowerCamelCase : Optional[int] = ''' He is very happy, UNwant\u00E9d,running'''
_lowerCamelCase : Dict = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Any = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
_lowerCamelCase : str = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase : List[Any] = tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
_lowerCamelCase : Tuple = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
_lowerCamelCase : Any = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Dict = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_lowerCamelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
_lowerCamelCase : Union[str, Any] = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
_lowerCamelCase : Optional[int] = json.load(__lowerCAmelCase )
_lowerCamelCase : str = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
_lowerCamelCase : Optional[int] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
_lowerCamelCase : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowerCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCamelCase : Dict = tokenizer_class.from_pretrained(
__lowerCAmelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCamelCase : Union[str, Any] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowerCAmelCase )]
_lowerCamelCase : int = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : Any = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
_lowerCamelCase : Dict = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
| 72 | """simple docstring"""
from collections import Counter
from timeit import timeit
def _lowerCamelCase( a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _lowerCamelCase( a = "" ):
if len(a ) == 0:
return True
__a = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a = {}
for character in lower_case_input_str:
__a = character_freq_dict.get(a , 0 ) + 1
__a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCamelCase( a = "" ):
print("\nFor string = " , a , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 261 | 0 |
import mpmath # for roots of unity
import numpy as np
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : str=None):
# Input as list
__lowerCamelCase : Optional[int] = list(poly_a or [0])[:]
__lowerCamelCase : Dict = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__lowerCamelCase : Optional[int] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__lowerCamelCase : Any = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__lowerCamelCase : int = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__lowerCamelCase : Tuple = complex(mpmath.root(x=1 ,n=self.c_max_length ,k=1))
# The product
__lowerCamelCase : Optional[Any] = self.__multiply()
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__) <= 1:
return dft[0]
#
__lowerCamelCase : str = self.c_max_length // 2
while next_ncol > 0:
__lowerCamelCase : Optional[Any] = [[] for i in range(SCREAMING_SNAKE_CASE__)]
__lowerCamelCase : Optional[int] = self.root**next_ncol
# First half of next step
__lowerCamelCase : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(SCREAMING_SNAKE_CASE__):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__lowerCamelCase : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(SCREAMING_SNAKE_CASE__):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__lowerCamelCase : Union[str, Any] = new_dft
__lowerCamelCase : Optional[Any] = next_ncol // 2
return dft[0]
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Optional[int] = self.__dft('A')
__lowerCamelCase : Dict = self.__dft('B')
__lowerCamelCase : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__lowerCamelCase : int = 2
while next_ncol <= self.c_max_length:
__lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE__)]
__lowerCamelCase : Dict = self.root ** (next_ncol // 2)
__lowerCamelCase : Union[str, Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__lowerCamelCase : List[Any] = new_inverse_c
next_ncol *= 2
# Unpack
__lowerCamelCase : Optional[Any] = [round(x[0].real ,8) + round(x[0].imag ,8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : int):
__lowerCamelCase : Union[str, Any] = 'A = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A]))
__lowerCamelCase : Tuple = 'B = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B]))
__lowerCamelCase : List[Any] = 'A*B = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.product))
return F"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''swinv2'''
_lowerCamelCase: Optional[int] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] ,A_ : str=224 ,A_ : Union[str, Any]=4 ,A_ : Union[str, Any]=3 ,A_ : Union[str, Any]=96 ,A_ : Optional[int]=[2, 2, 6, 2] ,A_ : Any=[3, 6, 12, 24] ,A_ : Union[str, Any]=7 ,A_ : str=4.0 ,A_ : int=True ,A_ : str=0.0 ,A_ : List[str]=0.0 ,A_ : str=0.1 ,A_ : List[Any]="gelu" ,A_ : Tuple=False ,A_ : Dict=0.02 ,A_ : Union[str, Any]=1e-5 ,A_ : int=32 ,**A_ : int ,) -> Any:
super().__init__(**A_ )
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = len(A_ )
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = layer_norm_eps
A = initializer_range
A = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A = int(embed_dim * 2 ** (len(A_ ) - 1) )
A = (0, 0, 0, 0) | 74 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
a_ : Optional[Any] = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
a_ : List[Any] = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
a_ : Optional[Any] = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''', id='''sequence''' ), id='''references''' ),
} ), codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''], reference_urls=[
'''https://github.com/m-popovic/chrF''',
], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = CHRF.CHAR_ORDER, lowerCAmelCase = CHRF.WORD_ORDER, lowerCAmelCase = CHRF.BETA, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, ):
"""simple docstring"""
lowerCamelCase_ =len(references[0] )
if any(len(lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowerCamelCase_ =[[refs[i] for refs in references] for i in range(lowerCAmelCase )]
lowerCamelCase_ =CHRF(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =sb_chrf.corpus_score(lowerCAmelCase, lowerCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 75 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__:List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a )["last_hidden_state"]
SCREAMING_SNAKE_CASE : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , a )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 76 | """simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a , a="attention" ):
__a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _lowerCamelCase( a , a , a , a=False ):
if split_mlp_wi:
__a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a = (wi_a, wi_a)
else:
__a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _lowerCamelCase( a , a , a , a ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _lowerCamelCase( a , *, a , a ):
__a = traverse_util.flatten_dict(variables["target"] )
__a = {"/".join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , a )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old["token_embedder/embedding"]
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "encoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
"encoder/relpos_bias/rel_embedding"
].T
__a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "decoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old["decoder/decoder_norm/scale"]
__a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase( a , a ):
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__a = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase( a , a , a , a ):
__a = checkpoints.load_tax_checkpoint(a )
__a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__a = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def _lowerCamelCase( a , a , a , a = False ):
__a = TaConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(a )
else:
__a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 261 | 0 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : int = 1000 ):
'''simple docstring'''
lowercase__ : Tuple = 2**power
lowercase__ : Tuple = str(_lowerCAmelCase )
lowercase__ : Union[str, Any] = list(_lowerCAmelCase )
lowercase__ : Optional[Any] = 0
for i in list_num:
sum_of_num += int(_lowerCAmelCase )
return sum_of_num
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
_UpperCamelCase : List[Any] = solution(power)
print("Sum of the digits is: ", result)
| 77 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = StableUnCLIPImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : List[Any] = frozenset([] )
def a__ ( self ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__a = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 261 | 0 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
snake_case_ = """base_with_context"""
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase = weights[F"""layers_{lyr_num}"""]
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase = ly_weight['attention']
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase = weights[F"""layers_{lyr_num}"""]
UpperCAmelCase = ly_weight['attention']
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowercase_ )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase = weights[F"""layers_{lyr_num}"""]
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase = ly_weight['self_attention']
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase = ly_weight['MultiHeadDotProductAttention_0']
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase = jnp.tree_util.tree_map(onp.array , lowercase_ )
UpperCAmelCase = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
UpperCAmelCase = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
UpperCAmelCase = inference.parse_training_gin_file(lowercase_ , lowercase_ )
UpperCAmelCase = inference.InferenceModel(args.checkpoint_path , lowercase_ )
UpperCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
UpperCAmelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowercase_ )
UpperCAmelCase = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowercase_ )
UpperCAmelCase = load_decoder(ta_checkpoint['target']['decoder'] , lowercase_ )
UpperCAmelCase = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
UpperCAmelCase = SpectrogramDiffusionPipeline(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
snake_case_ = parser.parse_args()
main(args)
| 78 | """simple docstring"""
import random
def _lowerCamelCase( a , a , a ):
__a = a[left_index]
__a = left_index + 1
for j in range(left_index + 1 , a ):
if a[j] < pivot:
__a , __a = a[i], a[j]
i += 1
__a , __a = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase( a , a , a ):
if left < right:
__a = random.randint(a , right - 1 )
__a , __a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a = partition(a , a , a )
quick_sort_random(
a , a , a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase( ):
__a = input("Enter numbers separated by a comma:\n" ).strip()
__a = [int(a ) for item in user_input.split("," )]
quick_sort_random(a , 0 , len(a ) )
print(a )
if __name__ == "__main__":
main()
| 261 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowercase ( __lowercase , __lowercase=False ) -> int:
'''simple docstring'''
_A = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_A = ""
else:
_A = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[
: config.hidden_size, :
]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[
-config.hidden_size :, :
]
_A = in_proj_bias[-config.hidden_size :]
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
_A = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
_A = dct.pop(__lowercase )
_A = val
def __lowercase ( ) -> List[str]:
'''simple docstring'''
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple:
'''simple docstring'''
_A = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , )
_A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 )
_A = False
# load original model from timm
_A = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_A = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowercase )
_A = create_rename_keys(__lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase , __lowercase )
_A = "huggingface/label-files"
_A = "imagenet-1k-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_A = ViTHybridModel(__lowercase ).eval()
else:
_A = ViTHybridForImageClassification(__lowercase ).eval()
model.load_state_dict(__lowercase )
# create image processor
_A = create_transform(**resolve_data_config({} , model=__lowercase ) )
_A = transform.transforms
_A = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_A = ViTHybridImageProcessor(
do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_A = prepare_img()
_A = transform(__lowercase ).unsqueeze(0 )
_A = processor(__lowercase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__lowercase , __lowercase )
# verify logits
with torch.no_grad():
_A = model(__lowercase )
_A = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
_A = timm_model.forward_features(__lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 )
else:
_A = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 79 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCamelCase__ = [[1, 2, 4], [1, 2, 3, 4]]
UpperCamelCase__ = DisjunctiveConstraint(a )
self.assertTrue(isinstance(dc.token_ids , a ) )
with self.assertRaises(a ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(a ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __a ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCamelCase__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(a ):
DisjunctiveConstraint(a ) # fails here
def __a ( self ):
UpperCamelCase__ = [[1, 2, 3], [1, 2, 4]]
UpperCamelCase__ = DisjunctiveConstraint(a )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(1 )
UpperCamelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(2 )
UpperCamelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(3 )
UpperCamelCase__ = stepped is True and completed is True and reset is False
self.assertTrue(a )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __a ( self ):
UpperCamelCase__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCamelCase__ = DisjunctiveConstraint(a )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 80 | """simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261 | 0 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
) | 81 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """upernet"""
def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = backbone_config.get("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(lowerCamelCase )
__a = backbone_config
__a = hidden_size
__a = initializer_range
__a = pool_scales
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_in_channels
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = loss_ignore_index
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 261 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = FlaxAutoencoderKL
@property
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
| 82 | """simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 0 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
snake_case_ : Tuple = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
lowercase__ = None
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , ):
import pyspark
def generate_fn():
_UpperCamelCase : List[str] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
_UpperCamelCase : int = df_with_partition_id.select('*' ).where(f'part_id = {partition_id}' ).drop('part_id' )
_UpperCamelCase : int = partition_df.collect()
_UpperCamelCase : List[Any] = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class lowercase__ ( _BaseExamplesIterable ):
def __init__( self : List[Any] ,lowerCamelCase__ : "pyspark.sql.DataFrame" ,lowerCamelCase__ : Dict=None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = df
_UpperCamelCase : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
_UpperCamelCase : str = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self : Optional[Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : np.random.Generator ):
'''simple docstring'''
_UpperCamelCase : Tuple = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase__ )
return SparkExamplesIterable(self.df ,partition_order=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : str = self.split_shard_indices_by_worker(lowerCamelCase__ ,lowerCamelCase__ )
return SparkExamplesIterable(self.df ,partition_order=lowerCamelCase__ )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return len(self.partition_order )
class lowercase__ ( datasets.DatasetBuilder ):
lowercase__ = SparkConfig
def __init__( self : Tuple ,lowerCamelCase__ : "pyspark.sql.DataFrame" ,lowerCamelCase__ : str = None ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
import pyspark
_UpperCamelCase : Union[str, Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_UpperCamelCase : Dict = df
_UpperCamelCase : Optional[Any] = working_dir
super().__init__(
cache_dir=lowerCamelCase__ ,config_name=str(self.df.semanticHash() ) ,**lowerCamelCase__ ,)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCamelCase__ : int ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = os.path.join(self._cache_dir ,'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase__ ,'a' )
return [probe_file]
if self._spark.conf.get('spark.master' ,'' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_UpperCamelCase : Any = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(lowerCamelCase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase__ : List[str] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
_UpperCamelCase : List[Any] = self.df.count()
_UpperCamelCase : int = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_UpperCamelCase : Dict = (
self.df.limit(lowerCamelCase__ )
.repartition(1 )
.mapInArrow(lowerCamelCase__ ,'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_UpperCamelCase : Union[str, Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_UpperCamelCase : Dict = min(lowerCamelCase__ ,int(approx_total_size / max_shard_size ) )
_UpperCamelCase : str = self.df.repartition(lowerCamelCase__ )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,):
'''simple docstring'''
import pyspark
_UpperCamelCase : List[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter
_UpperCamelCase : List[str] = os.path.join(self._working_dir ,os.path.basename(lowerCamelCase__ ) ) if self._working_dir else fpath
_UpperCamelCase : Optional[Any] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_UpperCamelCase : str = self.config.features
_UpperCamelCase : Dict = self._writer_batch_size
_UpperCamelCase : str = self._fs.storage_options
def write_arrow(lowerCamelCase__ : List[str] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_UpperCamelCase : Dict = pyspark.TaskContext().taskAttemptId()
_UpperCamelCase : Optional[int] = next(lowerCamelCase__ ,lowerCamelCase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=['task_id', 'num_examples', 'num_bytes'] ,)
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Any = writer_class(
features=lowerCamelCase__ ,path=working_fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,writer_batch_size=lowerCamelCase__ ,storage_options=lowerCamelCase__ ,embed_local_files=lowerCamelCase__ ,)
_UpperCamelCase : Any = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_UpperCamelCase , _UpperCamelCase : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=['task_id', 'num_examples', 'num_bytes'] ,)
shard_id += 1
_UpperCamelCase : str = writer_class(
features=writer._features ,path=working_fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,writer_batch_size=lowerCamelCase__ ,storage_options=lowerCamelCase__ ,embed_local_files=lowerCamelCase__ ,)
_UpperCamelCase : Dict = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase__ )
if writer._num_bytes > 0:
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=['task_id', 'num_examples', 'num_bytes'] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase__ ) ):
_UpperCamelCase : List[str] = os.path.join(os.path.dirname(lowerCamelCase__ ) ,os.path.basename(lowerCamelCase__ ) )
shutil.move(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : str = (
self.df.mapInArrow(lowerCamelCase__ ,'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) ,pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) ,pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) ,pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : "datasets.SplitGenerator" ,lowerCamelCase__ : str = "arrow" ,lowerCamelCase__ : Optional[Union[str, int]] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
self._validate_cache_dir()
_UpperCamelCase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = not is_remote_filesystem(self._fs )
_UpperCamelCase : Optional[int] = os.path.join if is_local else posixpath.join
_UpperCamelCase : Dict = '-TTTTT-SSSSS-of-NNNNN'
_UpperCamelCase : Union[str, Any] = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
_UpperCamelCase : Tuple = path_join(self._output_dir ,lowerCamelCase__ )
_UpperCamelCase : int = 0
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 0
_UpperCamelCase : Dict = []
_UpperCamelCase : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ):
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = total_num_examples
_UpperCamelCase : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
_UpperCamelCase : str = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_UpperCamelCase : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,):
rename(
lowerCamelCase__ ,fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,fpath.replace('TTTTT-SSSSS' ,F'{global_shard_id:05d}' ).replace('NNNNN' ,F'{total_shards:05d}' ) ,)
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Optional[int] = 0
for i in range(len(lowerCamelCase__ ) ):
_UpperCamelCase , _UpperCamelCase : str = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase__ ,len(lowerCamelCase__ ) ).map(lambda lowerCamelCase__ : _rename_shard(*lowerCamelCase__ ) ).collect()
else:
# don't use any pattern
_UpperCamelCase : Dict = 0
_UpperCamelCase : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' ,F'{shard_id:05d}' ).replace('TTTTT' ,F'{task_id:05d}' ) ,fpath.replace(lowerCamelCase__ ,'' ) ,)
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : "datasets.SplitGenerator" ,):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 83 | """simple docstring"""
import operator
def _lowerCamelCase( a , a = False , a = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(a ):
if _operator(a , sublist[-1] ):
sublist.append(a )
arr.pop(a )
# merging sublist into solution list
if not solution:
solution.extend(a )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(a ):
if not _operator(a , a ):
solution.insert(a , a )
break
else:
solution.append(a )
strand_sort(a , a , a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 261 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [False] * len(lowercase__ )
lowerCAmelCase_ :str = []
queue.append(lowercase__ )
lowerCAmelCase_ :Any = True
while queue:
lowerCAmelCase_ :Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :int = u
return visited[t]
def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ ))
lowerCAmelCase_ :str = 0
while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowerCAmelCase_ :List[str] = float("""Inf""" )
lowerCAmelCase_ :List[str] = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] )
lowerCAmelCase_ :Union[str, Any] = parent[s]
max_flow += path_flow
lowerCAmelCase_ :Tuple = sink
while v != source:
lowerCAmelCase_ :List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase_ :Union[str, Any] = parent[v]
return max_flow
__UpperCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__UpperCAmelCase , __UpperCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 84 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
snake_case_ = len(bin(snake_case )[3:] )
snake_case_ = bin(abs(snake_case ) - (1 << binary_number_length) )[3:]
snake_case_ = (
(
"1"
+ "0" * (binary_number_length - len(snake_case ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 0 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class A__ ( _lowerCamelCase):
# to overwrite at feature extractactor specific tests
A_ : Tuple = None
A_ : Any = None
@property
def __lowerCamelCase ( self ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'feature_size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sampling_rate' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'padding_value' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) for x, y in zip(_SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) )
__lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
__lowerCAmelCase : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Dict = feat_extract.model_input_names[0]
__lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
__lowerCAmelCase : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : List[str] = feat_extract.model_input_names[0]
__lowerCAmelCase : Any = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
__lowerCAmelCase : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=False ):
def _inputs_have_equal_length(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = len(input[0] )
for input_slice in input[1:]:
if len(_SCREAMING_SNAKE_CASE ) != length:
return False
return True
def _inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
return False
for input_slice_a, input_slice_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not np.allclose(np.asarray(_SCREAMING_SNAKE_CASE ) , np.asarray(_SCREAMING_SNAKE_CASE ) , atol=1E-3 ):
return False
return True
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = feat_extract.model_input_names[0]
__lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : Optional[Any] = self.feat_extract_tester.seq_length_diff
__lowerCAmelCase : Optional[int] = self.feat_extract_tester.max_seq_length + pad_diff
__lowerCAmelCase : int = self.feat_extract_tester.min_seq_length
__lowerCAmelCase : List[str] = self.feat_extract_tester.batch_size
__lowerCAmelCase : Union[str, Any] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = input_a[input_name]
__lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' )
__lowerCAmelCase : Union[str, Any] = input_a[input_name]
__lowerCAmelCase : Optional[Any] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[-1] ) )
__lowerCAmelCase : List[Any] = input_a[input_name]
__lowerCAmelCase : Union[str, Any] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )
__lowerCAmelCase : Any = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' )[input_name]
__lowerCAmelCase : List[Any] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=_SCREAMING_SNAKE_CASE , return_tensors='np' )
__lowerCAmelCase : Dict = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(_inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase : Optional[int] = feat_extract.pad(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=10 )
__lowerCAmelCase : Tuple = input_a[input_name]
__lowerCAmelCase : Tuple = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , pad_to_multiple_of=10 )
__lowerCAmelCase : Union[str, Any] = input_a[input_name]
__lowerCAmelCase : int = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , pad_to_multiple_of=10 , max_length=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = input_a[input_name]
__lowerCAmelCase : int = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , pad_to_multiple_of=10 , max_length=_SCREAMING_SNAKE_CASE , return_tensors='np' , )
__lowerCAmelCase : int = input_a[input_name]
self.assertTrue(all(len(_SCREAMING_SNAKE_CASE ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_SCREAMING_SNAKE_CASE ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowerCAmelCase : List[str] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=False ):
def _inputs_have_equal_length(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(input[0] )
for input_slice in input[1:]:
if len(_SCREAMING_SNAKE_CASE ) != length:
return False
return True
def _inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
return False
for input_slice_a, input_slice_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not np.allclose(np.asarray(_SCREAMING_SNAKE_CASE ) , np.asarray(_SCREAMING_SNAKE_CASE ) , atol=1E-3 ):
return False
return True
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase : str = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowerCAmelCase : List[Any] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = input_a[input_name]
__lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) )
__lowerCAmelCase : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
# truncate to smallest with np
__lowerCAmelCase : Optional[Any] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : int = input_a[input_name]
__lowerCAmelCase : List[str] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
__lowerCAmelCase : str = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
# truncate to middle
__lowerCAmelCase : Dict = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , )
__lowerCAmelCase : Tuple = input_a[input_name]
__lowerCAmelCase : Optional[Any] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = input_a[input_name]
__lowerCAmelCase : Optional[int] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
__lowerCAmelCase : Optional[Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(_inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
feat_extract.pad(_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , truncation=_SCREAMING_SNAKE_CASE )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , truncation=_SCREAMING_SNAKE_CASE )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase : str = 12
__lowerCAmelCase : Tuple = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : int = input_a[input_name]
__lowerCAmelCase : Optional[int] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowerCAmelCase : Union[str, Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowerCAmelCase : Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self ):
self._check_padding(numpify=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self._check_padding(numpify=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self._check_truncation(numpify=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self._check_truncation(numpify=_SCREAMING_SNAKE_CASE )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : List[str] = feat_extract.model_input_names[0]
__lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : Any = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )[input_name]
__lowerCAmelCase : Union[str, Any] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : Any = feat_extract.model_input_names[0]
__lowerCAmelCase : str = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : str = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )[input_name]
__lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.feat_extract_dict
__lowerCAmelCase : int = True
__lowerCAmelCase : Tuple = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : Optional[int] = [len(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
__lowerCAmelCase : List[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : Tuple = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _SCREAMING_SNAKE_CASE )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.feat_extract_dict
__lowerCAmelCase : str = True
__lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : Optional[int] = [len(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
__lowerCAmelCase : Tuple = feat_extract.model_input_names[0]
__lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : str = min(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' )
self.assertIn('attention_mask' , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) | 86 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(a , a , a , a )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a , a , a , a ):
__a = full_name.split("adaptor." )[-1]
__a = name.split("." )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a , a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(a , a , bias=a )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ):
__a = WavaVecaConfig.from_pretrained(
a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , )
__a = MBartConfig.from_pretrained(a )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a )
recursively_load_weights_wavaveca(model.encoder , a )
# load decoder weights
__a = MBartForCausalLM(a )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__a = SpeechEncoderDecoderModel(encoder=a , decoder=a )
__a = False
__a = MBartaaTokenizer(a )
tokenizer.save_pretrained(a )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = "mbart50"
__a = "wav2vec2"
__a = tokenizer.eos_token_id
__a = 2_5_0_0_0_4
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a )
hf_wavavec.save_pretrained(a )
feature_extractor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=False):
lowercase__ : List[str] = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias'''))
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
])
return rename_keys
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowercase__ : str = ""
else:
lowercase__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : List[str] = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''')
lowercase__ : int = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : str = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : List[Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowercase__ : str = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]):
lowercase__ : Union[str, Any] = dct.pop(_lowerCamelCase)
lowercase__ : Union[str, Any] = val
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict):
lowercase__ : Union[str, Any] = ViTMSNConfig()
lowercase__ : Dict = 1000
lowercase__ : str = "datasets/huggingface/label-files"
lowercase__ : List[str] = "imagenet-1k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase) , "r"))
lowercase__ : str = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : str = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase__ : Any = 384
lowercase__ : Optional[Any] = 1536
lowercase__ : Any = 6
elif "l16" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : Any = 4096
lowercase__ : Any = 24
lowercase__ : List[str] = 16
lowercase__ : Any = 0.1
elif "b4" in checkpoint_url:
lowercase__ : int = 4
elif "l7" in checkpoint_url:
lowercase__ : Optional[int] = 7
lowercase__ : Dict = 1024
lowercase__ : Union[str, Any] = 4096
lowercase__ : str = 24
lowercase__ : Dict = 16
lowercase__ : Any = 0.1
lowercase__ : Union[str, Any] = ViTMSNModel(_lowerCamelCase)
lowercase__ : Dict = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")["target_encoder"]
lowercase__ : Dict = ViTImageProcessor(size=config.image_size)
remove_projection_head(_lowerCamelCase)
lowercase__ : Optional[Any] = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase)
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
lowercase__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
lowercase__ : Any = ViTImageProcessor(
size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase)
lowercase__ : Tuple = image_processor(images=_lowerCamelCase , return_tensors="pt")
# forward pass
torch.manual_seed(2)
lowercase__ : int = model(**_lowerCamelCase)
lowercase__ : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase__ : Optional[Any] = torch.tensor([[-1.0915, -1.4876, -1.1809]])
elif "b16" in checkpoint_url:
lowercase__ : Tuple = torch.tensor([[14.2889, -18.9045, 11.7281]])
elif "l16" in checkpoint_url:
lowercase__ : Tuple = torch.tensor([[41.5028, -22.8681, 45.6475]])
elif "b4" in checkpoint_url:
lowercase__ : Optional[int] = torch.tensor([[-4.3868, 5.2932, -0.4137]])
else:
lowercase__ : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]])
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1E-4)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 87 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Any = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = GPTSwaTokenizer
a__ = False
a__ = True
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = GPTSwaTokenizer(UpperCamelCase__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : int , UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
__magic_name__ = """This is a test"""
__magic_name__ = """This is a test"""
return input_text, output_text
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__magic_name__ = """<s>"""
__magic_name__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(UpperCamelCase__ ) , 2000 )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
__magic_name__ = GPTSwaTokenizer(UpperCamelCase__ )
__magic_name__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [465, 287, 265, 631, 842] )
__magic_name__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
UpperCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
__magic_name__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
# fmt: off
self.assertListEqual(
UpperCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__magic_name__ = GPTSwaTokenizer(UpperCamelCase__ )
__magic_name__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
__magic_name__ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase__ ) , UpperCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase__ ) , UpperCamelCase__ )
@slow
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
__magic_name__ = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=UpperCamelCase__ , )
| 88 | """simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__)
def _lowerCamelCase( a ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(a , "git_log.json" ) , "w" ) as f:
json.dump(a , a , indent=4 )
def _lowerCamelCase( a ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["WORLD_SIZE"] )
__a = int(os.environ["N_GPU_NODE"] )
__a = int(os.environ["RANK"] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _lowerCamelCase( a ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 261 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def __lowercase ( self : Union[str, Any] ):
_a : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_a : List[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_a : Optional[Any] = 'The dog is cute and lives in the garden house'
_a : int = jnp.array([tokenizer.encode(_UpperCAmelCase )] )
_a : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_a : Dict = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_a : Tuple = model(_UpperCAmelCase )['last_hidden_state']
self.assertEqual(output.shape ,_UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_UpperCAmelCase ,atol=1E-3 ) )
| 89 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 261 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(lowerCamelCase__ )
def lowercase_ ( self , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
__lowerCamelCase = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
__lowerCamelCase = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
__lowerCamelCase = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
__lowerCamelCase = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
__lowerCamelCase = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
__lowerCamelCase = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
__lowerCamelCase = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
__lowerCamelCase = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
__lowerCamelCase = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
__lowerCamelCase = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
__lowerCamelCase = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
__lowerCamelCase = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , lowerCamelCase__ , *lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return super().__call__(lowerCamelCase__ , *lowerCamelCase__ , num_workers=lowerCamelCase__ , batch_size=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=64 , lowerCamelCase__ = 0 , lowerCamelCase__ = 512 / 1_500 , lowerCamelCase__ = 32 , lowerCamelCase__ = 1 , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = load_image(lowerCamelCase__ )
__lowerCamelCase = self.image_processor.size['longest_edge']
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.image_processor.generate_crop_boxes(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = self.image_processor(images=lowerCamelCase__ , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
__lowerCamelCase = self.get_inference_context()
with inference_context():
__lowerCamelCase = self._ensure_tensor_on_device(lowerCamelCase__ , device=self.device )
__lowerCamelCase = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
__lowerCamelCase = image_embeddings
__lowerCamelCase = grid_points.shape[1]
__lowerCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = grid_points[:, i : i + points_per_batch, :, :]
__lowerCamelCase = input_labels[:, i : i + points_per_batch]
__lowerCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0.88 , lowerCamelCase__=0.95 , lowerCamelCase__=0 , lowerCamelCase__=1 , ) -> Any:
'''simple docstring'''
__lowerCamelCase = model_inputs.pop('input_boxes' )
__lowerCamelCase = model_inputs.pop('is_last' )
__lowerCamelCase = model_inputs.pop('original_sizes' ).tolist()
__lowerCamelCase = model_inputs.pop('reshaped_input_sizes' ).tolist()
__lowerCamelCase = self.model(**lowerCamelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__lowerCamelCase = model_outputs['pred_masks']
__lowerCamelCase = self.image_processor.post_process_masks(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , binarize=lowerCamelCase__ )
__lowerCamelCase = model_outputs['iou_scores']
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=0.7 , ) -> Any:
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
__lowerCamelCase = torch.cat(lowerCamelCase__ )
__lowerCamelCase = torch.cat(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.image_processor.post_process_for_mask_generation(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = defaultdict(lowerCamelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase__ )
__lowerCamelCase = {}
if output_rle_mask:
__lowerCamelCase = rle_mask
if output_bboxes_mask:
__lowerCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 90 | """simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )]
def __str__( self ):
__a = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(lowerCamelCase , len(str(lowerCamelCase ) ) )
__a = F"%{max_element_length}s"
# Make string and return
def single_line(lowerCamelCase ) -> str:
nonlocal string_format_identifier
__a = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def a__ ( self , lowerCamelCase ):
if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCamelCase , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
__a = value
def __add__( self , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , lowerCamelCase ):
return self + (-another)
def __mul__( self , lowerCamelCase ):
if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = F"Unsupported type given for another ({type(lowerCamelCase )})"
raise TypeError(lowerCamelCase )
def a__ ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCamelCase( ):
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(F"a^(-1) is {ainv}" )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" )
def _lowerCamelCase( ):
import doctest
doctest.testmod()
testa()
| 261 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : str = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ["""MobileNetV2FeatureExtractor"""]
UpperCAmelCase_ : Dict = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 | """simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase( a , a , a , a , a=True , a="pt" ):
__a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , )
def _lowerCamelCase( a , a , a=None , ):
__a = input_ids.ne(a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ):
super().__init__()
__a = Path(lowerCamelCase ).joinpath(type_path + ".source" )
__a = Path(lowerCamelCase ).joinpath(type_path + ".target" )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" )
__a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" )
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" )
__a = source_inputs["input_ids"].squeeze()
__a = target_inputs["input_ids"].squeeze()
__a = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( lowerCamelCase ):
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def a__ ( self , lowerCamelCase ):
__a = torch.stack([x["input_ids"] for x in batch] )
__a = torch.stack([x["attention_mask"] for x in batch] )
__a = torch.stack([x["decoder_input_ids"] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = trim_batch(lowerCamelCase , lowerCamelCase )
__a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase )
__a = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__)
def _lowerCamelCase( a ):
return list(itertools.chain.from_iterable(a ) )
def _lowerCamelCase( a ):
__a = get_git_info()
save_json(a , os.path.join(a , "git_log.json" ) )
def _lowerCamelCase( a , a , a=4 , **a ):
with open(a , "w" ) as f:
json.dump(a , a , indent=a , **a )
def _lowerCamelCase( a ):
with open(a ) as f:
return json.load(a )
def _lowerCamelCase( ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase( a , a ):
return list(map(a , a ) )
def _lowerCamelCase( a , a ):
with open(a , "wb" ) as f:
return pickle.dump(a , a )
def _lowerCamelCase( a ):
def remove_articles(a ):
return re.sub(R"\b(a|an|the)\b" , " " , a )
def white_space_fix(a ):
return " ".join(text.split() )
def remove_punc(a ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) )
def _lowerCamelCase( a , a ):
__a = normalize_answer(a ).split()
__a = normalize_answer(a ).split()
__a = Counter(a ) & Counter(a )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(a )
__a = 1.0 * num_same / len(a )
__a = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase( a , a ):
return normalize_answer(a ) == normalize_answer(a )
def _lowerCamelCase( a , a ):
assert len(a ) == len(a )
__a = 0
for hypo, pred in zip(a , a ):
em += exact_match_score(a , a )
if len(a ) > 0:
em /= len(a )
return {"em": em}
def _lowerCamelCase( a ):
return model_prefix.startswith("rag" )
def _lowerCamelCase( a , a , a ):
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = "dropout_rate"
for p in extra_params:
if getattr(a , a , a ):
if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(a ) )
delattr(a , a )
continue
__a = p if hasattr(a , a ) else equivalent_param[p]
setattr(a , a , getattr(a , a ) )
delattr(a , a )
return hparams, config
| 261 | 0 |
def _a ( SCREAMING_SNAKE_CASE_ : list ):
if any(not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 92 | """simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261 | 0 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_lowercase : str = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowercase : str = concatenate_datasets
_lowercase : Any = DownloadConfig
_lowercase : Tuple = DownloadManager
_lowercase : Dict = DownloadMode
_lowercase : Union[str, Any] = DownloadConfig
_lowercase : str = DownloadMode
_lowercase : Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 93 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = kwargs.pop("padding_side" , "right" )
__a = kwargs.pop("return_attention_mask" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
__a = processed_features[self.model_input_names[0]]
__a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
__a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__a = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
__a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
__a = "tf"
elif is_torch_tensor(lowerCamelCase ):
__a = "pt"
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
__a = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(lowerCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__a = to_numpy(lowerCamelCase )
else:
__a = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
__a = processed_features[self.model_input_names[0]]
__a = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__a = []
for i in range(lowerCamelCase ):
__a = {k: v[i] for k, v in processed_features.items()}
# truncation
__a = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__a = PaddingStrategy.MAX_LENGTH
__a = {}
for i in range(lowerCamelCase ):
# padding
__a = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__a = []
if value.dtype is np.dtype(np.floataa ):
__a = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ):
__a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__a = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__a = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__a = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (0, difference) )
__a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
__a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__a = processed_features["attention_mask"][:max_length]
return processed_features
def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
__a = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = padding
else:
__a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 261 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0.2 , _lowerCamelCase=0.2 ):
a :Tuple = bp_numa
a :Tuple = bp_numa
a :int = bp_numa
a :Optional[Any] = conva_get[:2]
a :Dict = conva_get[2]
a :str = size_pa
a :str = rate_w
a :List[str] = rate_t
a :Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
a :Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a :Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a :Tuple = -2 * np.random.rand(self.conva[1] ) + 1
a :Any = -2 * np.random.rand(self.num_bpa ) + 1
a :Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# save model dict with pickle
a :Any = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowerCamelCase , '''wb''' ) as f:
pickle.dump(_lowerCamelCase , _lowerCamelCase )
print(F'''Model saved: {save_path}''' )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , _lowerCamelCase ):
# read saved model
with open(_lowerCamelCase , '''rb''' ) as f:
a :str = pickle.load(_lowerCamelCase ) # noqa: S301
a :List[str] = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
a :Optional[int] = model_dic.get('''size_pooling1''' )
a :str = model_dic.get('''num_bp1''' )
a :List[Any] = model_dic.get('''num_bp2''' )
a :Dict = model_dic.get('''num_bp3''' )
a :str = model_dic.get('''rate_weight''' )
a :str = model_dic.get('''rate_thre''' )
# create model instance
a :Optional[Any] = CNN(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# modify model parameter
a :Optional[Any] = model_dic.get('''w_conv1''' )
a :List[str] = model_dic.get('''wkj''' )
a :List[Any] = model_dic.get('''vji''' )
a :Optional[int] = model_dic.get('''thre_conv1''' )
a :Any = model_dic.get('''thre_bp2''' )
a :Optional[int] = model_dic.get('''thre_bp3''' )
return conv_ins
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return 1 / (1 + np.exp(-1 * x ))
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return round(_lowerCamelCase , 3 )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# convolution process
a :Dict = convs[0]
a :Optional[Any] = convs[1]
a :Union[str, Any] = np.shape(_lowerCamelCase )[0]
# get the data slice of original image data, data_focus
a :List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowerCamelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowerCamelCase ):
a :Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowerCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
a :int = []
a :List[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowerCamelCase ):
a :Tuple = []
for i_focus in range(len(_lowerCamelCase ) ):
a :str = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowerCamelCase ) )
a :str = np.asmatrix(_lowerCamelCase ).reshape(
_lowerCamelCase , _lowerCamelCase )
data_featuremap.append(_lowerCamelCase )
# expanding the data slice to One dimenssion
a :Any = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowerCamelCase ) )
a :Any = np.asarray(_lowerCamelCase )
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="average_pool" ):
# pooling process
a :Any = len(featuremaps[0] )
a :List[str] = int(size_map / size_pooling )
a :List[str] = []
for i_map in range(len(_lowerCamelCase ) ):
a :Optional[int] = featuremaps[i_map]
a :str = []
for i_focus in range(0 , _lowerCamelCase , _lowerCamelCase ):
for j_focus in range(0 , _lowerCamelCase , _lowerCamelCase ):
a :Union[str, Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowerCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowerCamelCase ) )
a :Dict = np.asmatrix(_lowerCamelCase ).reshape(_lowerCamelCase , _lowerCamelCase )
featuremap_pooled.append(_lowerCamelCase )
return featuremap_pooled
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# expanding three dimension data to one dimension list
a :Optional[Any] = []
for i in range(len(_lowerCamelCase ) ):
a :Optional[int] = np.shape(data[i] )
a :Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
a :List[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(_lowerCamelCase )
a :List[Any] = np.asarray(_lowerCamelCase )
return data_expanded
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# expanding matrix to one dimension list
a :Optional[Any] = np.asarray(_lowerCamelCase )
a :Any = np.shape(_lowerCamelCase )
a :Optional[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Tuple = []
a :List[str] = 0
for i_map in range(_lowerCamelCase ):
a :List[str] = np.ones((size_map, size_map) )
for i in range(0 , _lowerCamelCase , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = pd_pool[
i_pool
]
a :int = i_pool + 1
a :Optional[Any] = np.multiply(
_lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowerCamelCase )
return pd_all
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowerCamelCase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowerCamelCase )) )
a :Union[str, Any] = 0
a :List[Any] = []
a :Optional[Any] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
a :Optional[Any] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(_lowerCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
a :List[Any] = np.asmatrix(datas_train[p] )
a :int = np.asarray(datas_teach[p] )
a , a :Union[str, Any] = self.convolute(
_lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a :Optional[int] = self.pooling(_lowerCamelCase , self.size_poolinga )
a :Optional[int] = np.shape(_lowerCamelCase )
a :List[str] = self._expand(_lowerCamelCase )
a :Tuple = data_bp_input
a :str = np.dot(_lowerCamelCase , self.vji.T ) - self.thre_bpa
a :Optional[Any] = self.sig(_lowerCamelCase )
a :str = np.dot(_lowerCamelCase , self.wkj.T ) - self.thre_bpa
a :Union[str, Any] = self.sig(_lowerCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a :Any = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowerCamelCase , (1 - bp_outa) ) )
a :str = np.multiply(
np.dot(_lowerCamelCase , self.wkj ) , np.multiply(_lowerCamelCase , (1 - bp_outa) ) )
a :int = np.dot(_lowerCamelCase , self.vji )
a :Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
a :List[str] = pd_conva_pooled.T.getA().tolist()
a :Optional[int] = self._calculate_gradient_from_pool(
_lowerCamelCase , _lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
a :Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
a :List[Any] = self.rate_weight * np.dot(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
a :Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
a :str = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a :List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a :Dict = self.thre_bpa - pd_k_all * self.rate_thre
a :Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a :Union[str, Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a :List[str] = rp + 1
a :List[str] = error_count / patterns
all_mse.append(_lowerCamelCase )
def draw_error():
a :Optional[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowerCamelCase , '''+-''' )
plt.plot(_lowerCamelCase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowerCamelCase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# model predict
a :Any = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowerCamelCase )) )
for p in range(len(_lowerCamelCase ) ):
a :Dict = np.asmatrix(datas_test[p] )
a , a :Union[str, Any] = self.convolute(
_lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a :Optional[int] = self.pooling(_lowerCamelCase , self.size_poolinga )
a :int = self._expand(_lowerCamelCase )
a :Optional[int] = data_bp_input
a :Dict = bp_outa * self.vji.T - self.thre_bpa
a :List[Any] = self.sig(_lowerCamelCase )
a :Optional[int] = bp_outa * self.wkj.T - self.thre_bpa
a :Tuple = self.sig(_lowerCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
a :Optional[Any] = [list(map(self.do_round , _lowerCamelCase ) ) for each in produce_out]
return np.asarray(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# return the data of image after convoluting process so we can check it out
a :Union[str, Any] = np.asmatrix(_lowerCamelCase )
a , a :str = self.convolute(
_lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a :str = self.pooling(_lowerCamelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 94 | """simple docstring"""
from collections import Counter
from timeit import timeit
def _lowerCamelCase( a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _lowerCamelCase( a = "" ):
if len(a ) == 0:
return True
__a = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a = {}
for character in lower_case_input_str:
__a = character_freq_dict.get(a , 0 ) + 1
__a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCamelCase( a = "" ):
print("\nFor string = " , a , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 261 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase : Optional[Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
UpperCAmelCase : Optional[Any] = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
UpperCAmelCase : Dict = """|""".join(sys.argv[1:])
UpperCAmelCase : Optional[Any] = re.compile(rF"""^({joined_dirs}).*?\.py$""")
UpperCAmelCase : List[str] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 95 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=10 , lowercase=[10, 20, 30, 40] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : int = embeddings_size
_lowerCamelCase : Optional[Any] = hidden_sizes
_lowerCamelCase : Tuple = depths
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Union[str, Any] = len(lowercase )
def A_ ( self ):
_lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Dict = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = TFRegNetModel(config=lowercase )
_lowerCamelCase : Any = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : int = TFRegNetForImageClassification(lowercase )
_lowerCamelCase : List[str] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = TFRegNetModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A_ ( self ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def A_ ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def A_ ( self ):
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def A_ ( self ):
pass
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(lowercase )
_lowerCamelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A_ ( self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = model_class(lowercase )
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
_lowerCamelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase : Tuple = layer_type
_lowerCamelCase : int = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
_lowerCamelCase : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase )
_lowerCamelCase : int = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(lowercase )
_lowerCamelCase : Any = self._prepare_for_class(lowercase , lowercase )
_lowerCamelCase : List[Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
_lowerCamelCase : Any = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
_lowerCamelCase : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
_lowerCamelCase : int = self._prepare_for_class(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
_lowerCamelCase : Optional[Any] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
_lowerCamelCase : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def A_ ( self ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _snake_case ( ):
_lowerCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
_lowerCamelCase : Optional[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCamelCase : Optional[int] = self.default_image_processor
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
_lowerCamelCase : Dict = model(**lowercase , training=lowercase )
# verify the logits
_lowerCamelCase : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
_lowerCamelCase : str = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 ) | 96 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = np.random.default_rng(UpperCamelCase_ )
UpperCamelCase__ :Tuple = length
UpperCamelCase__ :Optional[int] = rng.normal(size=(length,) ).astype(np.floataa )
UpperCamelCase__ :str = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , UpperCamelCase_ ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ :List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ :Tuple = True
def lowerCAmelCase__ ( self , UpperCamelCase_=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCamelCase__ :Tuple = False
return x * self.a[0] + self.b[0]
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :List[str] = torch.nn.Parameter(torch.tensor(UpperCamelCase_ ).float() )
UpperCamelCase__ :List[Any] = torch.nn.Parameter(torch.tensor(UpperCamelCase_ ).float() )
UpperCamelCase__ :str = True
def lowerCAmelCase__ ( self , UpperCamelCase_=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCamelCase__ :Optional[int] = False
return x * self.a + self.b
def a ( __a , __a = 16 ) -> Union[str, Any]:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCamelCase__ :Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ :Tuple = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCamelCase__ :Dict = load_dataset('''csv''' , data_files=__a )
UpperCamelCase__ :int = datasets['''train'''].unique('''label''' )
UpperCamelCase__ :List[str] = {v: i for i, v in enumerate(__a )}
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Any = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a , padding='''max_length''' )
if "label" in examples:
UpperCamelCase__ :str = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :str = datasets.map(
__a , batched=__a , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ :int = DataLoader(tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=2 )
UpperCamelCase__ :Dict = DataLoader(tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=1 )
return train_dataloader, eval_dataloader | 97 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__:List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def a_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
UpperCAmelCase__ = 1_1
UpperCAmelCase__ = int('1' + '0' * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
UpperCAmelCase__ = 1_0
return solutions
def a_ ( lowerCamelCase = 2 ):
UpperCAmelCase__ = 1.0
for fraction in fraction_list(lowerCamelCase ):
UpperCAmelCase__ = Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 98 | """simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a , a="attention" ):
__a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _lowerCamelCase( a , a , a , a=False ):
if split_mlp_wi:
__a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a = (wi_a, wi_a)
else:
__a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _lowerCamelCase( a , a , a , a ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _lowerCamelCase( a , *, a , a ):
__a = traverse_util.flatten_dict(variables["target"] )
__a = {"/".join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , a )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old["token_embedder/embedding"]
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "encoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
"encoder/relpos_bias/rel_embedding"
].T
__a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "decoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old["decoder/decoder_norm/scale"]
__a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase( a , a ):
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__a = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase( a , a , a , a ):
__a = checkpoints.load_tax_checkpoint(a )
__a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__a = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def _lowerCamelCase( a , a , a , a = False ):
__a = TaConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(a )
else:
__a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 261 | 0 |
def A_ ( A__ ) -> int:
if not isinstance(A__ , A__ ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = StableUnCLIPImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : List[Any] = frozenset([] )
def a__ ( self ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__a = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 261 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__magic_name__ = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''ernie_m'''
__lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , lowerCAmelCase__ = 2_5_0_0_0_2 , lowerCAmelCase__ = 7_6_8 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 3_0_7_2 , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 5_1_4 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1E-05 , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=0.0 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = classifier_dropout
__SCREAMING_SNAKE_CASE = is_decoder
__SCREAMING_SNAKE_CASE = act_dropout
| 100 | """simple docstring"""
import random
def _lowerCamelCase( a , a , a ):
__a = a[left_index]
__a = left_index + 1
for j in range(left_index + 1 , a ):
if a[j] < pivot:
__a , __a = a[i], a[j]
i += 1
__a , __a = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase( a , a , a ):
if left < right:
__a = random.randint(a , right - 1 )
__a , __a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a = partition(a , a , a )
quick_sort_random(
a , a , a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase( ):
__a = input("Enter numbers separated by a comma:\n" ).strip()
__a = [int(a ) for item in user_input.split("," )]
quick_sort_random(a , 0 , len(a ) )
print(a )
if __name__ == "__main__":
main()
| 261 | 0 |
import re
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(re.findall('''[ATCG]''' , lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | 0 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def lowercase ( _snake_case : Callable ) ->Callable:
"""simple docstring"""
@wraps(_snake_case )
def _inner_fn(*_snake_case : int , **_snake_case : List[str] ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _snake_case , )
return fn(*_snake_case , **_snake_case )
return _inner_fn
| 102 | """simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Optional[int] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """upernet"""
def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = backbone_config.get("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(lowerCamelCase )
__a = backbone_config
__a = hidden_size
__a = initializer_range
__a = pool_scales
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_in_channels
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = loss_ignore_index
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 261 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = RobertaTokenizer
SCREAMING_SNAKE_CASE : str = RobertaTokenizerFast
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[Any] = {'cls_token': '<s>'}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
__lowercase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowercase = {'''unk_token''': '''<unk>'''}
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,**lowercase__ : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,**lowercase__ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Union[str, Any] ):
__lowercase = '''lower newer'''
__lowercase = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
__lowercase = '''lower newer'''
__lowercase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowercase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' ,add_special_tokens=lowercase__ ) ,[0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' ,add_special_tokens=lowercase__ ) ,[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] ,)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.tokenizer_class.from_pretrained('''roberta-base''' )
__lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=lowercase__ )
__lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=lowercase__ )
__lowercase = tokenizer.encode(
'''sequence builders''' ,add_special_tokens=lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = tokenizer.encode(
'''sequence builders''' ,'''multi-sequence build''' ,add_special_tokens=lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ ,lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.get_tokenizer()
__lowercase = '''Encode this sequence.'''
__lowercase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ ,lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ ,add_prefix_space=lowercase__ )
__lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ ,lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ ,lowercase__ )
# Testing spaces after special tokens
__lowercase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ )} ) # mask token has a left space
__lowercase = tokenizer.convert_tokens_to_ids(lowercase__ )
__lowercase = '''Encode <mask> sequence'''
__lowercase = '''Encode <mask>sequence'''
__lowercase = tokenizer.encode(lowercase__ )
__lowercase = encoded.index(lowercase__ )
__lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ ,lowercase__ )
__lowercase = tokenizer.encode(lowercase__ )
__lowercase = encoded.index(lowercase__ )
__lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowercase__ ,**lowercase__ )
__lowercase = self.tokenizer_class.from_pretrained(lowercase__ ,**lowercase__ )
__lowercase = '''A, <mask> AllenNLP sentence.'''
__lowercase = tokenizer_r.encode_plus(lowercase__ ,add_special_tokens=lowercase__ ,return_token_type_ids=lowercase__ )
__lowercase = tokenizer_p.encode_plus(lowercase__ ,add_special_tokens=lowercase__ ,return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) ,sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) ,sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) ,)
__lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowercase__ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : int ):
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
__lowercase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ )
__lowercase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] ,lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] ,lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase = F"{text_of_1_token} {text_of_1_token}"
__lowercase = self.rust_tokenizer_class.from_pretrained(
lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ )
__lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) ,)
__lowercase = self.rust_tokenizer_class.from_pretrained(
lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ )
__lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) ,)
__lowercase = self.rust_tokenizer_class.from_pretrained(
lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ )
__lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) ,)
__lowercase = self.rust_tokenizer_class.from_pretrained(
lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ )
__lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) ,)
__lowercase = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase = self.rust_tokenizer_class.from_pretrained(
lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ )
__lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) ,)
__lowercase = self.rust_tokenizer_class.from_pretrained(
lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ )
__lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) ,)
__lowercase = self.rust_tokenizer_class.from_pretrained(
lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ )
__lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) ,)
| 104 | """simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a : list[int] = [ord(letter) for letter in string.ascii_lowercase]
a : set[int] = {ord(char) for char in VALID_CHARS}
a : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : tuple[int, ...] ) ->str | None:
'''simple docstring'''
a : str = ""
a : int
a : int
a : int
for keychar, cipherchar in zip(cycle(_lowercase ) , _lowercase ):
a : Optional[int] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowercase )
return decoded
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] ) ->list[str]:
'''simple docstring'''
a : list[str] = []
for key in product(_lowercase , repeat=3 ):
a : Tuple = try_key(_lowercase , _lowercase )
if encoded is not None:
possibles.append(_lowercase )
return possibles
def _SCREAMING_SNAKE_CASE ( _lowercase : list[str] , _lowercase : str ) ->list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def _SCREAMING_SNAKE_CASE ( _lowercase : str = "p059_cipher.txt" ) ->int:
'''simple docstring'''
a : list[int]
a : list[str]
a : str
a : str
a : str = Path(_lowercase ).parent.joinpath(_lowercase ).read_text(encoding="utf-8" )
a : Union[str, Any] = [int(_lowercase ) for number in data.strip().split("," )]
a : Optional[Any] = filter_valid_chars(_lowercase )
for common_word in COMMON_WORDS:
a : Any = filter_common_word(_lowercase , _lowercase )
if len(_lowercase ) == 1:
break
a : Optional[Any] = possibles[0]
return sum(ord(_lowercase ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 105 | """simple docstring"""
import operator
def _lowerCamelCase( a , a = False , a = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(a ):
if _operator(a , sublist[-1] ):
sublist.append(a )
arr.pop(a )
# merging sublist into solution list
if not solution:
solution.extend(a )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(a ):
if not _operator(a , a ):
solution.insert(a , a )
break
else:
solution.append(a )
strand_sort(a , a , a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 261 | 0 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__UpperCamelCase : str = HfArgumentParser(InitializationArguments)
__UpperCamelCase : List[str] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__UpperCamelCase : Optional[int] = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
__UpperCamelCase : Tuple = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 106 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A : List[Any], A : Tuple, A : int, A : List[str] ):
'''simple docstring'''
a = FunnelConfig.from_json_file(A )
print(F"""Building PyTorch model from configuration: {config}""" )
a = FunnelBaseModel(A ) if base_model else FunnelModel(A )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(A, A, A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), A )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 107 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str =field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a : ClassVar[Features] =Features({"text": Value("string" )} )
a : ClassVar[Features] =Features({"labels": ClassLabel} )
a : str ="text"
a : str ="labels"
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowerCAmelCase : Optional[int] = copy.deepcopy(self )
lowerCAmelCase : str = self.label_schema.copy()
lowerCAmelCase : Dict = features[self.label_column]
lowerCAmelCase : Union[str, Any] = label_schema
return task_template
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 108 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(a , a , a , a )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a , a , a , a ):
__a = full_name.split("adaptor." )[-1]
__a = name.split("." )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a , a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(a , a , bias=a )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ):
__a = WavaVecaConfig.from_pretrained(
a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , )
__a = MBartConfig.from_pretrained(a )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a )
recursively_load_weights_wavaveca(model.encoder , a )
# load decoder weights
__a = MBartForCausalLM(a )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__a = SpeechEncoderDecoderModel(encoder=a , decoder=a )
__a = False
__a = MBartaaTokenizer(a )
tokenizer.save_pretrained(a )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = "mbart50"
__a = "wav2vec2"
__a = tokenizer.eos_token_id
__a = 2_5_0_0_0_4
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a )
hf_wavavec.save_pretrained(a )
feature_extractor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
UpperCAmelCase : Any = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
UpperCAmelCase : List[Any] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 109 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE_: Any =random.Random()
if is_torch_available():
import torch
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : List[Any]=1.0 , snake_case_ : Tuple=None , snake_case_ : str=None ) -> str:
'''simple docstring'''
if rng is None:
UpperCAmelCase_ = global_rng
UpperCAmelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __A ( unittest.TestCase ):
def __init__(self : List[Any] , __a : Optional[int] , __a : str=7 , __a : int=400 , __a : List[Any]=2000 , __a : List[Any]=1 , __a : int=0.0 , __a : Optional[int]=16000 , __a : int=True , __a : Union[str, Any]=True , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = min_seq_length
UpperCAmelCase_ = max_seq_length
UpperCAmelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ = feature_size
UpperCAmelCase_ = padding_value
UpperCAmelCase_ = sampling_rate
UpperCAmelCase_ = return_attention_mask
UpperCAmelCase_ = do_normalize
def _lowercase (self : Optional[int] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase (self : Optional[Any] , __a : Any=False , __a : List[str]=False ):
def _flatten(__a : Any ):
return list(itertools.chain(*__a ) )
if equal_length:
UpperCAmelCase_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( snake_case_ , unittest.TestCase ):
a__ : str = ASTFeatureExtractor
def _lowercase (self : Tuple ):
UpperCAmelCase_ = ASTFeatureExtractionTester(self )
def _lowercase (self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ = [np.asarray(__a ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_ = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase_ = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(__a , __a , atol=1E-3 ) )
# Test batched
UpperCAmelCase_ = feat_extract(__a , padding=__a , return_tensors="np" ).input_values
UpperCAmelCase_ = feat_extract(__a , padding=__a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase_ = np.asarray(__a )
UpperCAmelCase_ = feat_extract(__a , return_tensors="np" ).input_values
UpperCAmelCase_ = feat_extract(__a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1E-3 ) )
@require_torch
def _lowercase (self : Tuple ):
import torch
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ = np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase_ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase (self : Union[str, Any] , __a : List[str] ):
from datasets import load_dataset
UpperCAmelCase_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase_ = ds.sort("id" ).select(range(__a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def _lowercase (self : str ):
# fmt: off
UpperCAmelCase_ = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
UpperCAmelCase_ = self._load_datasamples(1 )
UpperCAmelCase_ = ASTFeatureExtractor()
UpperCAmelCase_ = feature_extractor(__a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __a , atol=1E-4 ) )
| 1 | """simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__)
def _lowerCamelCase( a ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(a , "git_log.json" ) , "w" ) as f:
json.dump(a , a , indent=4 )
def _lowerCamelCase( a ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["WORLD_SIZE"] )
__a = int(os.environ["N_GPU_NODE"] )
__a = int(os.environ["RANK"] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _lowerCamelCase( a ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 261 | 0 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase_ : Any = TypeVar('T')
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return (position - 1) // 2
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return (2 * position) + 1
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return (2 * position) + 2
class lowercase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : Any = {}
_SCREAMING_SNAKE_CASE : Tuple = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def UpperCAmelCase_ ( self ):
# Check if the priority queue is empty
return self.elements == 0
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_SCREAMING_SNAKE_CASE : Dict = self.elements
self.elements += 1
self._bubble_up(__snake_case )
def UpperCAmelCase_ ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.heap[0]
self._bubble_down(__snake_case )
return elem
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
# Update the weight of the given key
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_map[elem]
_SCREAMING_SNAKE_CASE : List[Any] = (elem, weight)
if position > 0:
_SCREAMING_SNAKE_CASE : int = get_parent_position(__snake_case )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__snake_case )
else:
self._bubble_down(__snake_case )
else:
self._bubble_down(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
_SCREAMING_SNAKE_CASE : Any = self.position_map[elem]
if curr_pos == 0:
return None
_SCREAMING_SNAKE_CASE : List[Any] = get_parent_position(__snake_case )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.heap[curr_pos]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_up(__snake_case )
return None
def UpperCAmelCase_ ( self , __snake_case ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
_SCREAMING_SNAKE_CASE : int = self.position_map[elem]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.heap[curr_pos]
_SCREAMING_SNAKE_CASE : Dict = get_child_left_position(__snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = get_child_right_position(__snake_case )
if child_left_position < self.elements and child_right_position < self.elements:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.heap[child_left_position]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
if child_left_position < self.elements:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
else:
return None
if child_right_position < self.elements:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
return None
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
# Swap the nodes at the given positions
_SCREAMING_SNAKE_CASE : Any = self.heap[nodea_pos][0]
_SCREAMING_SNAKE_CASE : Any = self.heap[nodea_pos][0]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_SCREAMING_SNAKE_CASE : int = nodea_pos
_SCREAMING_SNAKE_CASE : Any = nodea_pos
class lowercase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
_SCREAMING_SNAKE_CASE : Tuple = {}
_SCREAMING_SNAKE_CASE : int = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def UpperCAmelCase_ ( self , __snake_case ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_SCREAMING_SNAKE_CASE : int = {}
self.nodes += 1
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
# Add an edge between 2 nodes in the graph
self.add_node(__snake_case )
self.add_node(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = weight
_SCREAMING_SNAKE_CASE : Tuple = weight
def snake_case_ ( SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {node: maxsize for node in graph.connections}
_SCREAMING_SNAKE_CASE : Optional[Any] = {node: None for node in graph.connections}
_SCREAMING_SNAKE_CASE : int = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_SCREAMING_SNAKE_CASE : List[Any] = priority_queue.extract_min()
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_SCREAMING_SNAKE_CASE : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE__ , dist[neighbour] )
_SCREAMING_SNAKE_CASE : Optional[Any] = node
# running prim's algorithm
while not priority_queue.is_empty():
_SCREAMING_SNAKE_CASE : Tuple = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_SCREAMING_SNAKE_CASE : List[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE__ , dist[neighbour] )
_SCREAMING_SNAKE_CASE : Optional[int] = node
return dist, parent
| 200 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 261 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
_A : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
__SCREAMING_SNAKE_CASE : List[Any] = text_generator("""This is a test""" , do_sample=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
__SCREAMING_SNAKE_CASE : List[Any] = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
lowerCAmelCase__ , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
__SCREAMING_SNAKE_CASE : int = text_generator("""This is a test""" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{"""generated_token_ids""": ANY(lowerCAmelCase__ )},
{"""generated_token_ids""": ANY(lowerCAmelCase__ )},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = text_generator.model.config.eos_token_id
__SCREAMING_SNAKE_CASE : Any = """<pad>"""
__SCREAMING_SNAKE_CASE : Dict = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"""generated_token_ids""": ANY(lowerCAmelCase__ )},
{"""generated_token_ids""": ANY(lowerCAmelCase__ )},
],
[
{"""generated_token_ids""": ANY(lowerCAmelCase__ )},
{"""generated_token_ids""": ANY(lowerCAmelCase__ )},
],
] , )
@require_tf
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
__SCREAMING_SNAKE_CASE : Any = text_generator("""This is a test""" , do_sample=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
__SCREAMING_SNAKE_CASE : List[str] = text_generator(["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = TextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = """Hello I believe in"""
__SCREAMING_SNAKE_CASE : Dict = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = text_generator(lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
__SCREAMING_SNAKE_CASE : Any = text_generator(lowerCAmelCase__ , stop_sequence=""" fe""" )
self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": """Hello I believe in fe"""}] )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = text_generator.model
__SCREAMING_SNAKE_CASE : Dict = text_generator.tokenizer
__SCREAMING_SNAKE_CASE : str = text_generator("""This is a test""" )
self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": ANY(lowerCAmelCase__ )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__SCREAMING_SNAKE_CASE : Dict = text_generator("""This is a test""" , return_full_text=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": ANY(lowerCAmelCase__ )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__SCREAMING_SNAKE_CASE : str = pipeline(task="""text-generation""" , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , return_full_text=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = text_generator("""This is a test""" )
self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": ANY(lowerCAmelCase__ )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__SCREAMING_SNAKE_CASE : str = text_generator("""This is a test""" , return_full_text=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": ANY(lowerCAmelCase__ )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__SCREAMING_SNAKE_CASE : Optional[int] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
[{"""generated_text""": ANY(lowerCAmelCase__ )}, {"""generated_text""": ANY(lowerCAmelCase__ )}],
[{"""generated_text""": ANY(lowerCAmelCase__ )}, {"""generated_text""": ANY(lowerCAmelCase__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
[{"""generated_text""": ANY(lowerCAmelCase__ )}, {"""generated_text""": ANY(lowerCAmelCase__ )}],
[{"""generated_text""": ANY(lowerCAmelCase__ )}, {"""generated_text""": ANY(lowerCAmelCase__ )}],
] , )
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = text_generator("""test""" , return_full_text=lowerCAmelCase__ , return_text=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Dict = text_generator("""test""" , return_full_text=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = text_generator("""test""" , return_text=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__SCREAMING_SNAKE_CASE : str = text_generator("""""" )
self.assertEqual(lowerCAmelCase__ , [{"""generated_text""": ANY(lowerCAmelCase__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__SCREAMING_SNAKE_CASE : str = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0 )
__SCREAMING_SNAKE_CASE : Tuple = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(lowerCAmelCase__ ):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__SCREAMING_SNAKE_CASE : Tuple = pipe("""This is a test""" )
self.assertEqual(
lowerCAmelCase__ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__SCREAMING_SNAKE_CASE : List[str] = pipe("""This is a test""" )
self.assertEqual(
lowerCAmelCase__ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__SCREAMING_SNAKE_CASE : Tuple = pipe("""This is a test""" )
self.assertEqual(
lowerCAmelCase__ , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE : str = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=lowerCAmelCase__ , top_p=0.5 )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = """Hello world"""
__SCREAMING_SNAKE_CASE : Any = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger("""transformers.generation.tf_utils""" )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger("""transformers.generation.utils""" )
__SCREAMING_SNAKE_CASE : Tuple = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowerCAmelCase__ ) as cl:
__SCREAMING_SNAKE_CASE : Union[str, Any] = text_generator(lowerCAmelCase__ , max_length=1_0 , max_new_tokens=1 )
self.assertIn(lowerCAmelCase__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowerCAmelCase__ ) as cl:
__SCREAMING_SNAKE_CASE : Union[str, Any] = text_generator(lowerCAmelCase__ , max_new_tokens=1 )
self.assertNotIn(lowerCAmelCase__ , cl.out )
with CaptureLogger(lowerCAmelCase__ ) as cl:
__SCREAMING_SNAKE_CASE : str = text_generator(lowerCAmelCase__ , max_length=1_0 )
self.assertNotIn(lowerCAmelCase__ , cl.out ) | 112 | """simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )]
def __str__( self ):
__a = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(lowerCamelCase , len(str(lowerCamelCase ) ) )
__a = F"%{max_element_length}s"
# Make string and return
def single_line(lowerCamelCase ) -> str:
nonlocal string_format_identifier
__a = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def a__ ( self , lowerCamelCase ):
if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCamelCase , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
__a = value
def __add__( self , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , lowerCamelCase ):
return self + (-another)
def __mul__( self , lowerCamelCase ):
if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = F"Unsupported type given for another ({type(lowerCamelCase )})"
raise TypeError(lowerCamelCase )
def a__ ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCamelCase( ):
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(F"a^(-1) is {ainv}" )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" )
def _lowerCamelCase( ):
import doctest
doctest.testmod()
testa()
| 261 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a :List[Any] = logging.get_logger(__name__)
class __a (snake_case_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = ["""pixel_values"""]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = True , _a = 1 / 255 , _a = None , _a = True , _a = None , _a = None , **_a , ) -> Tuple:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : Tuple = size if size is not None else {"""height""": 224, """width""": 224}
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_a , default_to_square=_a , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE__ : Tuple = do_resize
SCREAMING_SNAKE_CASE__ : str = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = do_normalize
SCREAMING_SNAKE_CASE__ : str = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : str = resample
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(_a )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE__ : Tuple = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE__ : int = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _a ( self , _a , _a , _a = None , **_a , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def _a ( self , _a , _a , _a = None , **_a ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def _a ( self , _a , _a , _a , _a = None , **_a , ) -> Optional[Any]:
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_a , param_name="""crop_size""" , default_to_square=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(_a )
if not is_batched(_a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [images]
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(_a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : Tuple = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Any = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
SCREAMING_SNAKE_CASE__ : Any = [to_channel_dimension_format(_a , _a ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
| 132 | """simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase( a , a , a , a , a=True , a="pt" ):
__a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , )
def _lowerCamelCase( a , a , a=None , ):
__a = input_ids.ne(a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ):
super().__init__()
__a = Path(lowerCamelCase ).joinpath(type_path + ".source" )
__a = Path(lowerCamelCase ).joinpath(type_path + ".target" )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" )
__a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" )
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" )
__a = source_inputs["input_ids"].squeeze()
__a = target_inputs["input_ids"].squeeze()
__a = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( lowerCamelCase ):
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def a__ ( self , lowerCamelCase ):
__a = torch.stack([x["input_ids"] for x in batch] )
__a = torch.stack([x["attention_mask"] for x in batch] )
__a = torch.stack([x["decoder_input_ids"] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = trim_batch(lowerCamelCase , lowerCamelCase )
__a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase )
__a = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__)
def _lowerCamelCase( a ):
return list(itertools.chain.from_iterable(a ) )
def _lowerCamelCase( a ):
__a = get_git_info()
save_json(a , os.path.join(a , "git_log.json" ) )
def _lowerCamelCase( a , a , a=4 , **a ):
with open(a , "w" ) as f:
json.dump(a , a , indent=a , **a )
def _lowerCamelCase( a ):
with open(a ) as f:
return json.load(a )
def _lowerCamelCase( ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase( a , a ):
return list(map(a , a ) )
def _lowerCamelCase( a , a ):
with open(a , "wb" ) as f:
return pickle.dump(a , a )
def _lowerCamelCase( a ):
def remove_articles(a ):
return re.sub(R"\b(a|an|the)\b" , " " , a )
def white_space_fix(a ):
return " ".join(text.split() )
def remove_punc(a ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) )
def _lowerCamelCase( a , a ):
__a = normalize_answer(a ).split()
__a = normalize_answer(a ).split()
__a = Counter(a ) & Counter(a )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(a )
__a = 1.0 * num_same / len(a )
__a = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase( a , a ):
return normalize_answer(a ) == normalize_answer(a )
def _lowerCamelCase( a , a ):
assert len(a ) == len(a )
__a = 0
for hypo, pred in zip(a , a ):
em += exact_match_score(a , a )
if len(a ) > 0:
em /= len(a )
return {"em": em}
def _lowerCamelCase( a ):
return model_prefix.startswith("rag" )
def _lowerCamelCase( a , a , a ):
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = "dropout_rate"
for p in extra_params:
if getattr(a , a , a ):
if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(a ) )
delattr(a , a )
continue
__a = p if hasattr(a , a ) else equivalent_param[p]
setattr(a , a , getattr(a , a ) )
delattr(a , a )
return hparams, config
| 261 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ : Tuple = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase :
lowerCamelCase__ : int = PegasusConfig
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : Optional[int] = """gelu"""
def __init__( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]=1_3 , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=9_9 , __UpperCAmelCase : Tuple=3_2 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=3_7 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Tuple=2_0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Dict=1 , __UpperCAmelCase : int=0 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
SCREAMING_SNAKE_CASE__ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ = np.concatenate([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = 2_0
SCREAMING_SNAKE_CASE__ = model_class_name(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.encode(inputs_dict["""input_ids"""] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
SCREAMING_SNAKE_CASE__ = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE__ = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = model.decode(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = 2_0
SCREAMING_SNAKE_CASE__ = model_class_name(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.encode(inputs_dict["""input_ids"""] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
SCREAMING_SNAKE_CASE__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE__ = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE__ = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = model.decode(__UpperCAmelCase , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = np.not_equal(snake_case__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase (snake_case_ ,unittest.TestCase ):
lowerCamelCase__ : str = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCamelCase__ : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCamelCase__ : str = True
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = FlaxPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
@jax.jit
def encode_jitted(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]=None , **__UpperCAmelCase : List[Any] ):
return model.encode(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE__ = encode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ = encode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
SCREAMING_SNAKE_CASE__ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str ):
return model.decode(
decoder_input_ids=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , encoder_outputs=__UpperCAmelCase , )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE__ = decode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ = decode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = np.ones((1, 1) )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
SCREAMING_SNAKE_CASE__ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
SCREAMING_SNAKE_CASE__ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
SCREAMING_SNAKE_CASE__ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
SCREAMING_SNAKE_CASE__ = tokenizer(__UpperCAmelCase , return_tensors="""np""" , truncation=__UpperCAmelCase , max_length=5_1_2 , padding=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(**__UpperCAmelCase , num_beams=2 ).sequences
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
assert tgt_text == decoded
| 165 | """simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __a ( snake_case_ ):
__lowercase : Tuple = """mobilenet_v1"""
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=224 , lowerCAmelCase__=1.0 , lowerCAmelCase__=8 , lowerCAmelCase__="relu6" , lowerCAmelCase__=True , lowerCAmelCase__=0.9_9_9 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0.0_0_1 , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
lowercase__: List[str] = num_channels
lowercase__: List[str] = image_size
lowercase__: Tuple = depth_multiplier
lowercase__: Union[str, Any] = min_depth
lowercase__: str = hidden_act
lowercase__: Union[str, Any] = tf_padding
lowercase__: Optional[int] = classifier_dropout_prob
lowercase__: int = initializer_range
lowercase__: Any = layer_norm_eps
class __a ( snake_case_ ):
__lowercase : Optional[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
return 1E-4
| 196 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = kwargs.pop("padding_side" , "right" )
__a = kwargs.pop("return_attention_mask" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
__a = processed_features[self.model_input_names[0]]
__a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
__a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__a = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
__a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
__a = "tf"
elif is_torch_tensor(lowerCamelCase ):
__a = "pt"
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
__a = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(lowerCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__a = to_numpy(lowerCamelCase )
else:
__a = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
__a = processed_features[self.model_input_names[0]]
__a = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__a = []
for i in range(lowerCamelCase ):
__a = {k: v[i] for k, v in processed_features.items()}
# truncation
__a = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__a = PaddingStrategy.MAX_LENGTH
__a = {}
for i in range(lowerCamelCase ):
# padding
__a = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__a = []
if value.dtype is np.dtype(np.floataa ):
__a = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ):
__a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__a = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__a = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__a = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (0, difference) )
__a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
__a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__a = processed_features["attention_mask"][:max_length]
return processed_features
def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
__a = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = padding
else:
__a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 261 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = GPTSanJapaneseTokenizer
_lowerCamelCase: str = False
_lowerCamelCase: Dict = {"""do_clean_text""": False, """add_prefix_space""": False}
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
super().setUp()
# fmt: off
A = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
A = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
A = {'unk_token': '<unk>'}
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file ,'w' ) as emoji_writer:
emoji_writer.write(json.dumps(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : Dict ) -> Tuple:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ) -> Dict:
A = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
A = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ) -> List[Any]:
A , A = self.get_input_output_texts(A_ )
A = tokenizer.encode(A_ ,add_special_tokens=A_ )
A = tokenizer.decode(A_ ,clean_up_tokenization_spaces=A_ )
return text, ids
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
A = self.get_tokenizer()
# Testing tokenization
A = 'こんにちは、世界。 こんばんは、㔺界。'
A = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
A = tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
# Testing conversion to ids without special tokens
A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
A = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(A_ ,A_ )
# Testing conversion to ids with special tokens
A = tokens + [tokenizer.unk_token]
A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
A = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
A = self.get_tokenizer()
# Testing tokenization
A = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
A = 'こんにちは、、、、世界。こんばんは、、、、世界。'
A = tokenizer.encode(A_ )
A = tokenizer.decode(A_ )
self.assertEqual(A_ ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
A = 'こんにちは、世界。'
A = 'こんばんは、㔺界。😀'
A = 'こんにちは、世界。こんばんは、世界。😀'
A = tokenizer.encode(prefix_text + input_text )
A = tokenizer.encode('' ,prefix_text=prefix_text + input_text )
A = tokenizer.encode(A_ ,prefix_text=A_ )
A = tokenizer.decode(A_ )
A = tokenizer.decode(A_ )
A = tokenizer.decode(A_ )
self.assertEqual(A_ ,A_ )
self.assertEqual(A_ ,A_ )
self.assertEqual(A_ ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
A = 'こんにちは、世界。'
A = 'こんばんは、㔺界。😀'
A = len(tokenizer.encode(A_ ) ) - 2
A = len(tokenizer.encode(A_ ) ) - 2
A = [1] + [0] * (len_prefix + len_text + 1)
A = [1] * (len_prefix + len_text + 1) + [0]
A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
A = tokenizer(prefix_text + input_text ).token_type_ids
A = tokenizer('' ,prefix_text=prefix_text + input_text ).token_type_ids
A = tokenizer(A_ ,prefix_text=A_ ).token_type_ids
self.assertListEqual(A_ ,A_ )
self.assertListEqual(A_ ,A_ )
self.assertListEqual(A_ ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
A = tokenizer.encode('あンいワ' )
A = tokenizer.encode('' ,prefix_text='あンいワ' )
A = tokenizer.encode('いワ' ,prefix_text='あン' )
self.assertEqual(tokenizer.decode(A_ ) ,tokenizer.decode(A_ ) )
self.assertEqual(tokenizer.decode(A_ ) ,tokenizer.decode(A_ ) )
self.assertNotEqual(A_ ,A_ )
self.assertNotEqual(A_ ,A_ )
self.assertEqual(x_token_a[1] ,x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] ,x_token_a[3] ) # SEG token
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
A = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
A = tokenizer(A_ ,padding=A_ )
A = tokenizer.batch_encode_plus(A_ ,padding=A_ )
# fmt: off
A = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids ,A_ )
self.assertListEqual(x_token.token_type_ids ,A_ )
self.assertListEqual(x_token.attention_mask ,A_ )
self.assertListEqual(x_token_a.input_ids ,A_ )
self.assertListEqual(x_token_a.token_type_ids ,A_ )
self.assertListEqual(x_token_a.attention_mask ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
# tokenizer has no padding token
pass | 74 | """simple docstring"""
from collections import Counter
from timeit import timeit
def _lowerCamelCase( a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _lowerCamelCase( a = "" ):
if len(a ) == 0:
return True
__a = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a = {}
for character in lower_case_input_str:
__a = character_freq_dict.get(a , 0 ) + 1
__a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCamelCase( a = "" ):
print("\nFor string = " , a , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 261 | 0 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , lowerCAmelCase_ : str = "" , lowerCAmelCase_ : int = False):
"""simple docstring"""
lowercase_ = {}
# A node will be a leaf if the tree contains its word
lowercase_ = is_leaf
lowercase_ = prefix
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = 0
for q, w in zip(self.prefix , lowerCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
for word in words:
self.insert(lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
if self.prefix == word:
lowercase_ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase_ = RadixNode(prefix=lowerCAmelCase_ , is_leaf=lowerCAmelCase_)
else:
lowercase_ = self.nodes[word[0]]
lowercase_ , lowercase_ , lowercase_ = incoming_node.match(
lowerCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase_ = remaining_prefix
lowercase_ = self.nodes[matching_string[0]]
lowercase_ = RadixNode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = aux_node
if remaining_word == "":
lowercase_ = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = self.nodes.get(word[0] , lowerCAmelCase_)
if not incoming_node:
return False
else:
lowercase_ , lowercase_ , lowercase_ = incoming_node.match(
lowerCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.nodes.get(word[0] , lowerCAmelCase_)
if not incoming_node:
return False
else:
lowercase_ , lowercase_ , lowercase_ = incoming_node.match(
lowerCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
lowercase_ = list(self.nodes.values())[0]
lowercase_ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase_ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
lowercase_ = False
# If there is 1 edge, we merge it with its child
else:
lowercase_ = list(incoming_node.nodes.values())[0]
lowercase_ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase_ = merging_node.nodes
return True
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Dict = 0):
"""simple docstring"""
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""")
for value in self.nodes.values():
value.print_tree(height + 1)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = """banana bananas bandana band apple all beast""".split()
lowercase_ = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
assert test_trie()
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
lowercase_ = RadixNode()
lowercase_ = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(__lowerCAmelCase )
print("""Words:""" , __lowerCAmelCase )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 136 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
from PIL import Image
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
__magic_name__ , __magic_name__ : List[str] = image.size
__magic_name__ : List[str] = 0
__magic_name__ : int = image.load()
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
__magic_name__ : str = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase ):
for i in range(lowerCAmelCase ):
__magic_name__ : List[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCAmelCase :Union[str, Any] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''') | 331 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def snake_case ( UpperCAmelCase )-> Any:
"""simple docstring"""
__A = tmp_path / 'file.csv'
__A = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def snake_case ( UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
__A = tmp_path / 'malformed_file.csv'
__A = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Union[str, Any]:
"""simple docstring"""
__A = tmp_path / 'csv_with_image.csv'
__A = textwrap.dedent(
f'\\n image\n {image_file}\n ' )
with open(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def snake_case ( UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
__A = tmp_path / 'csv_with_label.csv'
__A = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def snake_case ( UpperCAmelCase )-> int:
"""simple docstring"""
__A = tmp_path / 'csv_with_int_list.csv'
__A = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(UpperCAmelCase , 'w' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> List[str]:
"""simple docstring"""
__A = Csv()
__A = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCAmelCase , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(UpperCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def snake_case ( UpperCAmelCase )-> int:
"""simple docstring"""
with open(UpperCAmelCase , encoding='utf-8' ) as f:
__A = f.read().splitlines()[1]
__A = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__A = csv._generate_tables([[csv_file_with_image]] )
__A = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__A = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
with open(UpperCAmelCase , encoding='utf-8' ) as f:
__A = f.read().splitlines()[1:]
__A = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__A = csv._generate_tables([[csv_file_with_label]] )
__A = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__A = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(UpperCAmelCase ) for label in labels]
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda UpperCAmelCase : [int(UpperCAmelCase ) for i in x.split()]} )
__A = csv._generate_tables([[csv_file_with_int_list]] )
__A = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__A = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 161 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__:List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : List[str] = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 | """simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a , a="attention" ):
__a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _lowerCamelCase( a , a , a , a=False ):
if split_mlp_wi:
__a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a = (wi_a, wi_a)
else:
__a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _lowerCamelCase( a , a , a , a ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _lowerCamelCase( a , *, a , a ):
__a = traverse_util.flatten_dict(variables["target"] )
__a = {"/".join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , a )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old["token_embedder/embedding"]
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "encoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
"encoder/relpos_bias/rel_embedding"
].T
__a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "decoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old["decoder/decoder_norm/scale"]
__a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase( a , a ):
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__a = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase( a , a , a , a ):
__a = checkpoints.load_tax_checkpoint(a )
__a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__a = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def _lowerCamelCase( a , a , a , a = False ):
__a = TaConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(a )
else:
__a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 261 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Tuple ) -> Dict:
'''simple docstring'''
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 1 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : str = StableUnCLIPImgaImgPipeline
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_snake_case : List[Any] = frozenset([] )
def a__ ( self ):
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
__a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 , lowerCamelCase=True ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self ):
__a = "cpu" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**lowerCamelCase )
__a = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = self.get_dummy_inputs(lowerCamelCase )
inputs.update({"image_embeds": None} )
__a = sd_pipe(**lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def a__ ( self ):
__a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device="cpu" ).manual_seed(0 )
__a = pipe(lowerCamelCase , "anime turle" , generator=lowerCamelCase , output_type="np" )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 261 | 0 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
_SCREAMING_SNAKE_CASE : Optional[int] = Stack()
_SCREAMING_SNAKE_CASE : str = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE__ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE__ )
elif i == ")":
# RULE 4
_SCREAMING_SNAKE_CASE : Any = operator_stack.peek()
operator_stack.pop()
_SCREAMING_SNAKE_CASE : Tuple = operand_stack.peek()
operand_stack.pop()
_SCREAMING_SNAKE_CASE : List[str] = operand_stack.peek()
operand_stack.pop()
_SCREAMING_SNAKE_CASE : Any = operators[opr](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
operand_stack.push(SCREAMING_SNAKE_CASE__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase_ : str = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 200 | """simple docstring"""
import random
def _lowerCamelCase( a , a , a ):
__a = a[left_index]
__a = left_index + 1
for j in range(left_index + 1 , a ):
if a[j] < pivot:
__a , __a = a[i], a[j]
i += 1
__a , __a = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase( a , a , a ):
if left < right:
__a = random.randint(a , right - 1 )
__a , __a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a = partition(a , a , a )
quick_sort_random(
a , a , a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase( ):
__a = input("Enter numbers separated by a comma:\n" ).strip()
__a = [int(a ) for item in user_input.split("," )]
quick_sort_random(a , 0 , len(a ) )
print(a )
if __name__ == "__main__":
main()
| 261 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = StableDiffusionSAGPipeline
_A : Any = TEXT_TO_IMAGE_PARAMS
_A : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Dict = False
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__SCREAMING_SNAKE_CASE : int = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str=0 ):
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE : str = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
__SCREAMING_SNAKE_CASE : int = sag_pipe.to(lowerCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = """."""
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = sag_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images
__SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__SCREAMING_SNAKE_CASE : Tuple = sag_pipe.to(lowerCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = """."""
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sag_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""" )
__SCREAMING_SNAKE_CASE : List[str] = output.images
__SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__SCREAMING_SNAKE_CASE : Dict = sag_pipe.to(lowerCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = """."""
__SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""" , )
__SCREAMING_SNAKE_CASE : str = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3) | 112 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a :Optional[Any] = """\
"""
a :List[str] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
a :str = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __a (datasets.Metric):
'''simple docstring'''
def _a ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def _a ( self , _a , _a , _a = 16 , _a = True , _a=None ) -> int:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE__ : List[Any] = """cuda"""
else:
SCREAMING_SNAKE_CASE__ : str = """cuda""" if torch.cuda.is_available() else """cpu"""
SCREAMING_SNAKE_CASE__ : int = AutoModelForCausalLM.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : str = model.to(_a )
SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = model.config.max_length
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors="""pt""" , return_attention_mask=_a , ).to(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = encodings["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Tuple = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
SCREAMING_SNAKE_CASE__ : int = min(start_index + batch_size , len(_a ) )
SCREAMING_SNAKE_CASE__ : str = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE__ : Dict = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , attention_mask=_a ).logits
SCREAMING_SNAKE_CASE__ : Union[str, Any] = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE__ : List[str] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE__ : Dict = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE__ : List[str] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 132 | """simple docstring"""
import copy
import re
class snake_case__ :
_snake_case : Dict = """hp"""
_snake_case : List[str] = {}
_snake_case : int = None
@classmethod
def a__ ( cls , lowerCamelCase , lowerCamelCase ):
__a = prefix
__a = defaults
cls.build_naming_info()
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) == 0:
return ""
__a = None
if any(char.isdigit() for char in word ):
raise Exception(F"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase ) + 1 ):
__a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase ):
__a = ""
while integer != 0:
__a = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
__a = 0
while True:
__a = word + "#" + int_to_alphabetic(lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a = sword
break
__a = short_word
__a = word
return short_word
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = param_name.split("_" )
__a = [TrialShortNamer.shortname_for_word(lowerCamelCase , lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a = ["", "_"]
for separator in separators:
__a = separator.join(lowerCamelCase )
if shortname not in info["reverse_short_param"]:
__a = shortname
__a = param_name
return shortname
return param_name
@staticmethod
def a__ ( lowerCamelCase , lowerCamelCase ):
__a = TrialShortNamer.shortname_for_key(lowerCamelCase , lowerCamelCase )
__a = short_name
__a = param_name
@classmethod
def a__ ( cls ):
if cls.NAMING_INFO is not None:
return
__a = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
__a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase , lowerCamelCase )
__a = info
@classmethod
def a__ ( cls , lowerCamelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = 1 if v else 0
__a = "" if isinstance(lowerCamelCase , (int, float) ) else "-"
__a = F"{key}{sep}{v}"
name.append(lowerCamelCase )
return "_".join(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a = []
else:
__a = repr.split("_" )
__a = {}
for value in values:
if "-" in value:
__a , __a = value.split("-" )
else:
__a = re.sub("[0-9.]" , "" , lowerCamelCase )
__a = float(re.sub("[^0-9.]" , "" , lowerCamelCase ) )
__a = cls.NAMING_INFO["reverse_short_param"][p_k]
__a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a = cls.DEFAULTS[k]
return parameters
| 261 | 0 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCamelCase :
def __init__( self : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=0.2 , __UpperCAmelCase : Union[str, Any]=0.2 ) -> Dict:
SCREAMING_SNAKE_CASE__ = bp_numa
SCREAMING_SNAKE_CASE__ = bp_numa
SCREAMING_SNAKE_CASE__ = bp_numa
SCREAMING_SNAKE_CASE__ = conva_get[:2]
SCREAMING_SNAKE_CASE__ = conva_get[2]
SCREAMING_SNAKE_CASE__ = size_pa
SCREAMING_SNAKE_CASE__ = rate_w
SCREAMING_SNAKE_CASE__ = rate_t
SCREAMING_SNAKE_CASE__ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ = -2 * np.random.rand(self.num_bpa ) + 1
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Union[str, Any] ) -> List[str]:
# save model dict with pickle
SCREAMING_SNAKE_CASE__ = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(__UpperCAmelCase , """wb""" ) as f:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
print(F"""Model saved: {save_path}""" )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , __UpperCAmelCase : Any ) -> Optional[Any]:
# read saved model
with open(__UpperCAmelCase , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(__UpperCAmelCase ) # noqa: S301
SCREAMING_SNAKE_CASE__ = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
SCREAMING_SNAKE_CASE__ = model_dic.get("""size_pooling1""" )
SCREAMING_SNAKE_CASE__ = model_dic.get("""num_bp1""" )
SCREAMING_SNAKE_CASE__ = model_dic.get("""num_bp2""" )
SCREAMING_SNAKE_CASE__ = model_dic.get("""num_bp3""" )
SCREAMING_SNAKE_CASE__ = model_dic.get("""rate_weight""" )
SCREAMING_SNAKE_CASE__ = model_dic.get("""rate_thre""" )
# create model instance
SCREAMING_SNAKE_CASE__ = CNN(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# modify model parameter
SCREAMING_SNAKE_CASE__ = model_dic.get("""w_conv1""" )
SCREAMING_SNAKE_CASE__ = model_dic.get("""wkj""" )
SCREAMING_SNAKE_CASE__ = model_dic.get("""vji""" )
SCREAMING_SNAKE_CASE__ = model_dic.get("""thre_conv1""" )
SCREAMING_SNAKE_CASE__ = model_dic.get("""thre_bp2""" )
SCREAMING_SNAKE_CASE__ = model_dic.get("""thre_bp3""" )
return conv_ins
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : List[str] ) -> Optional[Any]:
return 1 / (1 + np.exp(-1 * x ))
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Union[str, Any] ) -> Tuple:
return round(__UpperCAmelCase , 3 )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : str ) -> Tuple:
# convolution process
SCREAMING_SNAKE_CASE__ = convs[0]
SCREAMING_SNAKE_CASE__ = convs[1]
SCREAMING_SNAKE_CASE__ = np.shape(__UpperCAmelCase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ = []
for i_focus in range(0 , size_data - size_conv + 1 , __UpperCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = []
for i_focus in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE__ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = np.asmatrix(__UpperCAmelCase ).reshape(
__UpperCAmelCase , __UpperCAmelCase )
data_featuremap.append(__UpperCAmelCase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = np.asarray(__UpperCAmelCase )
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str="average_pool" ) -> List[str]:
# pooling process
SCREAMING_SNAKE_CASE__ = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ = []
for i_map in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE__ = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ = []
for i_focus in range(0 , __UpperCAmelCase , __UpperCAmelCase ):
for j_focus in range(0 , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = np.asmatrix(__UpperCAmelCase ).reshape(__UpperCAmelCase , __UpperCAmelCase )
featuremap_pooled.append(__UpperCAmelCase )
return featuremap_pooled
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ = []
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE__ = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = np.asarray(__UpperCAmelCase )
return data_expanded
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[str] ) -> str:
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ = np.asarray(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = np.shape(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i_map in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = np.ones((size_map, size_map) )
for i in range(0 , __UpperCAmelCase , __UpperCAmelCase ):
for j in range(0 , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ = i_pool + 1
SCREAMING_SNAKE_CASE__ = np.multiply(
__UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__UpperCAmelCase )
return pd_all
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=bool ) -> str:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(__UpperCAmelCase )) )
print((""" - - Shape: Teach_Data """, np.shape(__UpperCAmelCase )) )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(__UpperCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.convolute(
__UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ = self.pooling(__UpperCAmelCase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ = np.shape(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self._expand(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = data_bp_input
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ = self.sig(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ = self.sig(__UpperCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ = np.multiply(
(data_teach - bp_outa) , np.multiply(__UpperCAmelCase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ = np.multiply(
np.dot(__UpperCAmelCase , self.wkj ) , np.multiply(__UpperCAmelCase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCAmelCase , self.vji )
SCREAMING_SNAKE_CASE__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ = self._calculate_gradient_from_pool(
__UpperCAmelCase , __UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ = self.rate_weight * np.dot(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ = rp + 1
SCREAMING_SNAKE_CASE__ = error_count / patterns
all_mse.append(__UpperCAmelCase )
def draw_error():
SCREAMING_SNAKE_CASE__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCAmelCase , """+-""" )
plt.plot(__UpperCAmelCase , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(__UpperCAmelCase , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Dict ) -> Optional[int]:
# model predict
SCREAMING_SNAKE_CASE__ = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(__UpperCAmelCase )) )
for p in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE__ = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.convolute(
__UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ = self.pooling(__UpperCAmelCase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ = self._expand(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = data_bp_input
SCREAMING_SNAKE_CASE__ = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ = self.sig(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ = self.sig(__UpperCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ = [list(map(self.do_round , __UpperCAmelCase ) ) for each in produce_out]
return np.asarray(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] ) -> Any:
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ = np.asmatrix(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.convolute(
__UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ = self.pooling(__UpperCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 165 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """upernet"""
def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = backbone_config.get("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(lowerCamelCase )
__a = backbone_config
__a = hidden_size
__a = initializer_range
__a = pool_scales
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_in_channels
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = loss_ignore_index
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 261 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence' ),
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name == "default":
lowercase__: int = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
lowercase__: List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False ) -> Any:
'''simple docstring'''
if gpus is None:
lowercase__: Tuple = 1 if torch.cuda.is_available() else 0
lowercase__: str = {'src': sources, 'mt': predictions, 'ref': references}
lowercase__: Tuple = [dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) for t in zip(*data.values() )]
lowercase__ , lowercase__: str = self.scorer.predict(lowerCAmelCase__ , gpus=lowerCAmelCase__ , progress_bar=lowerCAmelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 196 | """simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase = logging.getLogger(__name__)
def _snake_case ( snake_case__ : Dict , snake_case__ : Optional[Any] ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowerCamelCase: Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
_lowerCamelCase: str = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
_lowerCamelCase: int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase: bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed
set_seed(training_args.seed )
try:
A = processors[data_args.task_name]()
A = processor.get_labels()
A = len(snake_case__ )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , )
# Get datasets
A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case__ : Dict ) -> Dict:
A = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(snake_case__ , p.label_ids )}
# Data collator
A = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(snake_case__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , snake_case__ , snake_case__ )
writer.write('%s = %s\n' % (key, value) )
results.update(snake_case__ )
return results
def _snake_case ( snake_case__ : Optional[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 74 | """simple docstring"""
import operator
def _lowerCamelCase( a , a = False , a = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(a ):
if _operator(a , sublist[-1] ):
sublist.append(a )
arr.pop(a )
# merging sublist into solution list
if not solution:
solution.extend(a )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(a ):
if not _operator(a , a ):
solution.insert(a , a )
break
else:
solution.append(a )
strand_sort(a , a , a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 261 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCAmelCase : Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
for attribute in key.split(""".""" ):
lowercase_ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
lowercase_ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
lowercase_ = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = full_name.split("""adaptor.""" )[-1]
lowercase_ = name.split(""".""" )
if items[1].isdigit():
lowercase_ = int(items[1] )
else:
lowercase_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
lowercase_ = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
lowercase_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
lowercase_ = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
lowercase_ = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
lowercase_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
lowercase_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
lowercase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Any:
'''simple docstring'''
lowercase_ = WavaVecaConfig.from_pretrained(
__lowerCAmelCase , add_adapter=__lowerCAmelCase , adapter_stride=__lowerCAmelCase , adapter_kernel_size=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , output_hidden_size=__lowerCAmelCase , )
lowercase_ = MBartConfig.from_pretrained(__lowerCAmelCase )
# load model
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
lowercase_ = model[0].eval()
# load feature extractor
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase , use_auth_token=__lowerCAmelCase )
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(__lowerCAmelCase )
recursively_load_weights_wavaveca(model.encoder , __lowerCAmelCase )
# load decoder weights
lowercase_ = MBartForCausalLM(__lowerCAmelCase )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__lowerCAmelCase )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
lowercase_ = False
lowercase_ = MBartaaTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """mbart50"""
lowercase_ = """wav2vec2"""
lowercase_ = tokenizer.eos_token_id
lowercase_ = 25_00_04
lowercase_ = tokenizer.eos_token_id
lowercase_ = SpeechEncoderDecoderConfig.from_dict(__lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_0004, type=int, help="`decoder_start_token_id` of model config")
UpperCAmelCase : List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 136 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase :int = logging.get_logger(__name__)
lowerCAmelCase :Optional[int] = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
A_ : Tuple = """deit"""
def __init__( self : Union[str, Any] , _A : List[str]=768 , _A : List[Any]=12 , _A : Tuple=12 , _A : int=3072 , _A : Union[str, Any]="gelu" , _A : Any=0.0 , _A : Tuple=0.0 , _A : Tuple=0.02 , _A : str=1E-12 , _A : Optional[Any]=224 , _A : str=16 , _A : List[Any]=3 , _A : Optional[int]=True , _A : Dict=16 , **_A : List[Any] , ) -> Tuple:
super().__init__(**_A )
__magic_name__ : Dict = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : List[str] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : List[str] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Tuple = initializer_range
__magic_name__ : Union[str, Any] = layer_norm_eps
__magic_name__ : List[Any] = image_size
__magic_name__ : Optional[Any] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : Any = qkv_bias
__magic_name__ : Tuple = encoder_stride
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
A_ : List[Any] = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
return 1E-4 | 331 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 0 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( snake_case_):
UpperCAmelCase__ : Optional[Any] = (UnCLIPScheduler,)
def lowercase_ ( self :List[str] , **_A :Any ) -> Tuple:
'''simple docstring'''
__A = {
'num_train_timesteps': 1_000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**_A )
return config
def lowercase_ ( self :Dict ) -> Optional[int]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_A )
def lowercase_ ( self :Dict ) -> Dict:
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_A )
def lowercase_ ( self :str ) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def lowercase_ ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_A )
def lowercase_ ( self :Dict ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_A )
def lowercase_ ( self :Dict ) -> Optional[int]:
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_A , prev_timestep=_A )
def lowercase_ ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config(variance_type='fixed_small_log' )
__A = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config(variance_type='learned_range' )
__A = scheduler_class(**_A )
__A = 0.5
assert scheduler._get_variance(1 , predicted_variance=_A ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_A ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_A ) - -0.0_010_011 < 1E-5
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config()
__A = scheduler_class(**_A )
__A = scheduler.timesteps
__A = self.dummy_model()
__A = self.dummy_sample_deter
__A = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
__A = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__A = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
__A = pred_prev_sample
__A = torch.sum(torch.abs(_A ) )
__A = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def lowercase_ ( self :Dict ) -> int:
'''simple docstring'''
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config()
__A = scheduler_class(**_A )
scheduler.set_timesteps(25 )
__A = scheduler.timesteps
__A = self.dummy_model()
__A = self.dummy_sample_deter
__A = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
__A = model(_A , _A )
if i + 1 == timesteps.shape[0]:
__A = None
else:
__A = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__A = scheduler.step(
_A , _A , _A , prev_timestep=_A , generator=_A ).prev_sample
__A = pred_prev_sample
__A = torch.sum(torch.abs(_A ) )
__A = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def lowercase_ ( self :Any ) -> Dict:
'''simple docstring'''
pass
def lowercase_ ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
| 161 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(a , a , a , a )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a , a , a , a ):
__a = full_name.split("adaptor." )[-1]
__a = name.split("." )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a , a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(a , a , bias=a )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ):
__a = WavaVecaConfig.from_pretrained(
a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , )
__a = MBartConfig.from_pretrained(a )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a )
recursively_load_weights_wavaveca(model.encoder , a )
# load decoder weights
__a = MBartForCausalLM(a )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__a = SpeechEncoderDecoderModel(encoder=a , decoder=a )
__a = False
__a = MBartaaTokenizer(a )
tokenizer.save_pretrained(a )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = "mbart50"
__a = "wav2vec2"
__a = tokenizer.eos_token_id
__a = 2_5_0_0_0_4
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a )
hf_wavavec.save_pretrained(a )
feature_extractor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE = 6 ):
"""simple docstring"""
lowercase_ : Tuple = None
lowercase_ : Tuple = None
self.create_linked_list(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = Node()
lowercase_ : List[Any] = current_node
lowercase_ : List[str] = current_node
lowercase_ : str = current_node
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = Node()
lowercase_ : Dict = current_node
lowercase_ : Dict = previous_node
lowercase_ : int = current_node
lowercase_ : Any = self.front
lowercase_ : List[str] = previous_node
def _snake_case ( self ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _snake_case ( self ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ : Optional[int] = self.rear.next
if self.rear:
lowercase_ : List[str] = data
def _snake_case ( self ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ : List[str] = self.front.data
lowercase_ : str = None
return data
lowercase_ : Any = self.front
lowercase_ : Any = old_front.next
lowercase_ : int = old_front.data
lowercase_ : Optional[Any] = None
return data
def _snake_case ( self ):
"""simple docstring"""
if self.is_empty():
raise Exception('''Empty Queue''' )
def _snake_case ( self ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : List[str] = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 0 |
'''simple docstring'''
import numpy as np
from PIL import Image
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Dict , snake_case_ : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = np.array(snake_case_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
# compute the shape of the output matrix
UpperCAmelCase_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCAmelCase_ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCAmelCase_ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
return updated_arr
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int , snake_case_ : Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = np.array(snake_case_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
# compute the shape of the output matrix
UpperCAmelCase_ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCAmelCase_ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCAmelCase_ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
SCREAMING_SNAKE_CASE_: Optional[Any] =Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 1 | """simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__)
def _lowerCamelCase( a ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(a , "git_log.json" ) , "w" ) as f:
json.dump(a , a , indent=4 )
def _lowerCamelCase( a ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["WORLD_SIZE"] )
__a = int(os.environ["N_GPU_NODE"] )
__a = int(os.environ["RANK"] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _lowerCamelCase( a ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 261 | 0 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case_ ( *SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : int = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_SCREAMING_SNAKE_CASE : List[Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case_ ( SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
"""simple docstring"""
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
_SCREAMING_SNAKE_CASE : List[str] = """, """.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 200 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__:List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 261 | 0 |
'''simple docstring'''
import operator
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: List[Any] = False , _lowerCamelCase: Optional[int] = None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = operator.lt if reverse else operator.gt
__SCREAMING_SNAKE_CASE : Union[str, Any] = solution or []
if not arr:
return solution
__SCREAMING_SNAKE_CASE : List[str] = [arr.pop(0 )]
for i, item in enumerate(_lowerCamelCase ):
if _operator(_lowerCamelCase , sublist[-1] ):
sublist.append(_lowerCamelCase )
arr.pop(_lowerCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase )
else:
while sublist:
__SCREAMING_SNAKE_CASE : Tuple = sublist.pop(0 )
for i, xx in enumerate(_lowerCamelCase ):
if not _operator(_lowerCamelCase , _lowerCamelCase ):
solution.insert(_lowerCamelCase , _lowerCamelCase )
break
else:
solution.append(_lowerCamelCase )
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 112 | """simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )]
def __str__( self ):
__a = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(lowerCamelCase , len(str(lowerCamelCase ) ) )
__a = F"%{max_element_length}s"
# Make string and return
def single_line(lowerCamelCase ) -> str:
nonlocal string_format_identifier
__a = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def a__ ( self , lowerCamelCase ):
if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCamelCase , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
__a = value
def __add__( self , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , lowerCamelCase ):
return self + (-another)
def __mul__( self , lowerCamelCase ):
if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = F"Unsupported type given for another ({type(lowerCamelCase )})"
raise TypeError(lowerCamelCase )
def a__ ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCamelCase( ):
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(F"a^(-1) is {ainv}" )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" )
def _lowerCamelCase( ):
import doctest
doctest.testmod()
testa()
| 261 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a :List[str] = logging.get_logger(__name__)
a :List[str] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __a (snake_case_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = """mobilenet_v2"""
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.02 , _a=0.001 , _a=255 , **_a , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : List[str] = depth_multiplier
SCREAMING_SNAKE_CASE__ : Tuple = depth_divisible_by
SCREAMING_SNAKE_CASE__ : Tuple = min_depth
SCREAMING_SNAKE_CASE__ : Tuple = expand_ratio
SCREAMING_SNAKE_CASE__ : Tuple = output_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = first_layer_is_expansion
SCREAMING_SNAKE_CASE__ : Tuple = finegrained_output
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf_padding
SCREAMING_SNAKE_CASE__ : List[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : int = semantic_loss_ignore_index
class __a (snake_case_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = version.parse("""1.11""")
@property
def _a ( self ) -> str:
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _a ( self ) -> Any:
"""simple docstring"""
return 1E-4
| 132 | """simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase( a , a , a , a , a=True , a="pt" ):
__a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {}
__a = padding_side
return tokenizer(
[line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , )
def _lowerCamelCase( a , a , a=None , ):
__a = input_ids.ne(a ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ):
super().__init__()
__a = Path(lowerCamelCase ).joinpath(type_path + ".source" )
__a = Path(lowerCamelCase ).joinpath(type_path + ".target" )
__a = self.get_char_lens(self.src_file )
__a = max_source_length
__a = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
__a = tokenizer
__a = prefix
if n_obs is not None:
__a = self.src_lens[:n_obs]
__a = src_lang
__a = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase ):
__a = index + 1 # linecache starts at 1
__a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" )
__a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
)
__a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" )
__a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" )
__a = source_inputs["input_ids"].squeeze()
__a = target_inputs["input_ids"].squeeze()
__a = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( lowerCamelCase ):
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def a__ ( self , lowerCamelCase ):
__a = torch.stack([x["input_ids"] for x in batch] )
__a = torch.stack([x["attention_mask"] for x in batch] )
__a = torch.stack([x["decoder_input_ids"] for x in batch] )
__a = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
__a = trim_batch(lowerCamelCase , lowerCamelCase )
__a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase )
__a = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__)
def _lowerCamelCase( a ):
return list(itertools.chain.from_iterable(a ) )
def _lowerCamelCase( a ):
__a = get_git_info()
save_json(a , os.path.join(a , "git_log.json" ) )
def _lowerCamelCase( a , a , a=4 , **a ):
with open(a , "w" ) as f:
json.dump(a , a , indent=a , **a )
def _lowerCamelCase( a ):
with open(a ) as f:
return json.load(a )
def _lowerCamelCase( ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase( a , a ):
return list(map(a , a ) )
def _lowerCamelCase( a , a ):
with open(a , "wb" ) as f:
return pickle.dump(a , a )
def _lowerCamelCase( a ):
def remove_articles(a ):
return re.sub(R"\b(a|an|the)\b" , " " , a )
def white_space_fix(a ):
return " ".join(text.split() )
def remove_punc(a ):
__a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) )
def _lowerCamelCase( a , a ):
__a = normalize_answer(a ).split()
__a = normalize_answer(a ).split()
__a = Counter(a ) & Counter(a )
__a = sum(common.values() )
if num_same == 0:
return 0
__a = 1.0 * num_same / len(a )
__a = 1.0 * num_same / len(a )
__a = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase( a , a ):
return normalize_answer(a ) == normalize_answer(a )
def _lowerCamelCase( a , a ):
assert len(a ) == len(a )
__a = 0
for hypo, pred in zip(a , a ):
em += exact_match_score(a , a )
if len(a ) > 0:
em /= len(a )
return {"em": em}
def _lowerCamelCase( a ):
return model_prefix.startswith("rag" )
def _lowerCamelCase( a , a , a ):
__a = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a = "dropout_rate"
for p in extra_params:
if getattr(a , a , a ):
if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(a ) )
delattr(a , a )
continue
__a = p if hasattr(a , a ) else equivalent_param[p]
setattr(a , a , getattr(a , a ) )
delattr(a , a )
return hparams, config
| 261 | 0 |
"""simple docstring"""
def A ( snake_case__ , snake_case__ = " " ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for index, char in enumerate(snake_case__ ):
if char == separator:
split_words.append(string[last_index:index] )
SCREAMING_SNAKE_CASE__ = index + 1
elif index + 1 == len(snake_case__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 165 | """simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __a ( snake_case_ ):
__lowercase : Union[str, Any] = """EncodecFeatureExtractor"""
__lowercase : int = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: str = self.feature_extractor
lowercase__: Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> int:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase__ , language=lowerCAmelCase__ , no_timestamps=lowerCAmelCase__ )
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Tuple = kwargs.pop('audio' , lowerCAmelCase__ )
lowercase__: str = kwargs.pop('sampling_rate' , lowerCAmelCase__ )
lowercase__: Optional[int] = kwargs.pop('text' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase__: List[str] = args[0]
lowercase__: int = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
lowercase__: List[Any] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if audio is not None:
lowercase__: str = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowercase__: str = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
lowercase__: Any = audio_inputs['padding_mask']
return inputs
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = kwargs.pop('audio' , lowerCAmelCase__ )
lowercase__: Any = kwargs.pop('padding_mask' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase__: Optional[Any] = args[0]
lowercase__: str = args[1:]
if audio_values is not None:
return self._decode_audio(lowerCAmelCase__ , padding_mask=lowerCAmelCase__ )
else:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Dict:
'''simple docstring'''
lowercase__: Any = to_numpy(lowerCAmelCase__ )
lowercase__ , lowercase__ , lowercase__: List[Any] = audio_values.shape
if padding_mask is None:
return list(lowerCAmelCase__ )
lowercase__: List[Any] = to_numpy(lowerCAmelCase__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowercase__: int = seq_len - padding_mask.shape[-1]
lowercase__: Union[str, Any] = 1 - self.feature_extractor.padding_value
lowercase__: Tuple = np.pad(lowerCAmelCase__ , ((0, 0), (0, difference)) , 'constant' , constant_values=lowerCAmelCase__ )
lowercase__: List[Any] = audio_values.tolist()
for i in range(lowerCAmelCase__ ):
lowercase__: List[str] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowercase__: str = sliced_audio.reshape(lowerCAmelCase__ , -1 )
return audio_values
| 196 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
__a = feature_size
__a = sampling_rate
__a = padding_value
__a = kwargs.pop("padding_side" , "right" )
__a = kwargs.pop("return_attention_mask" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
__a = processed_features[self.model_input_names[0]]
__a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
__a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__a = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
__a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
__a = "tf"
elif is_torch_tensor(lowerCamelCase ):
__a = "pt"
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
__a = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(lowerCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__a = to_numpy(lowerCamelCase )
else:
__a = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__a = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
__a = processed_features[self.model_input_names[0]]
__a = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__a = []
for i in range(lowerCamelCase ):
__a = {k: v[i] for k, v in processed_features.items()}
# truncation
__a = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__a = PaddingStrategy.MAX_LENGTH
__a = {}
for i in range(lowerCamelCase ):
# padding
__a = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__a = []
if value.dtype is np.dtype(np.floataa ):
__a = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ):
__a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__a = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__a = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__a = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (0, difference) )
__a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__a = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__a = np.pad(
lowerCamelCase , lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__a = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
__a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__a = processed_features["attention_mask"][:max_length]
return processed_features
def a__ ( self , lowerCamelCase=False , lowerCamelCase=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
__a = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = padding
else:
__a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 261 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Any ,A_ : Dict=7 ,A_ : List[Any]=3 ,A_ : Tuple=18 ,A_ : Any=30 ,A_ : Optional[int]=400 ,A_ : Optional[int]=True ,A_ : Dict=None ,A_ : Tuple=True ,) -> List[str]:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = apply_ocr
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'apply_ocr' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,A_ )
self.assertIsInstance(encoding.boxes ,A_ )
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ ,numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ ,torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
# with apply_OCR = True
A = LayoutLMvaImageProcessor()
from datasets import load_dataset
A = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
A = Image.open(ds[0]['file'] ).convert('RGB' )
A = image_processing(A_ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
A = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,A_ )
self.assertListEqual(encoding.boxes ,A_ )
# with apply_OCR = False
A = LayoutLMvaImageProcessor(apply_ocr=A_ )
A = image_processing(A_ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) ) | 74 | """simple docstring"""
from collections import Counter
from timeit import timeit
def _lowerCamelCase( a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _lowerCamelCase( a = "" ):
if len(a ) == 0:
return True
__a = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a = {}
for character in lower_case_input_str:
__a = character_freq_dict.get(a , 0 ) + 1
__a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCamelCase( a = "" ):
print("\nFor string = " , a , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 261 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase : str = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : str = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Dict = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
UpperCAmelCase : Optional[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
UpperCAmelCase : str = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
UpperCAmelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase : str = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase : Any = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(snake_case_ )
class SCREAMING_SNAKE_CASE__ :
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Optional[int] = False , lowerCAmelCase_ : Union[str, Any] = False , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : List[Any] = None , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
elif titles is None or texts is None:
lowercase_ = titles if texts is None else texts
return super().__call__(
lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = titles if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) else [titles]
lowercase_ = texts if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) else [texts]
lowercase_ = len(lowerCAmelCase_)
lowercase_ = questions if not isinstance(lowerCAmelCase_ , lowerCAmelCase_) else [questions] * n_passages
if len(lowerCAmelCase_) != len(lowerCAmelCase_):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCAmelCase_)} titles and {len(lowerCAmelCase_)} texts.''')
lowercase_ = super().__call__(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_)["""input_ids"""]
lowercase_ = super().__call__(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_)["""input_ids"""]
lowercase_ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase_ , lowerCAmelCase_)
]
}
if return_attention_mask is not False:
lowercase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
lowercase_ = attention_mask
return self.pad(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] = 1_6 , lowerCAmelCase_ : Union[str, Any] = 6_4 , lowerCAmelCase_ : Any = 4 , ):
"""simple docstring"""
lowercase_ = reader_input["""input_ids"""]
lowercase_ , lowercase_ , lowercase_ = reader_output[:3]
lowercase_ = len(lowerCAmelCase_)
lowercase_ = sorted(range(lowerCAmelCase_) , reverse=lowerCAmelCase_ , key=relevance_logits.__getitem__)
lowercase_ = []
for doc_id in sorted_docs:
lowercase_ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
lowercase_ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase_ = sequence_ids.index(self.pad_token_id)
else:
lowercase_ = len(lowerCAmelCase_)
lowercase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase_ , top_spans=lowerCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase_ , start_index=lowerCAmelCase_ , end_index=lowerCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowerCAmelCase_) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
lowercase_ = []
for start_index, start_score in enumerate(lowerCAmelCase_):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
lowercase_ = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[1] , reverse=lowerCAmelCase_)
lowercase_ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
lowercase_ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCAmelCase_) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case_ )
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = ["""input_ids""", """attention_mask"""]
| 136 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : str = StableDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__magic_name__ : Optional[int] = load_file(lowerCAmelCase )
__magic_name__ : Tuple = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__magic_name__ : Any = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
__magic_name__ : Optional[Any] = pipeline.text_encoder
else:
__magic_name__ : int = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
__magic_name__ : List[str] = pipeline.unet
# find the target layer
__magic_name__ : Optional[Any] = layer_infos.pop(0 )
while len(lowerCAmelCase ) > -1:
try:
__magic_name__ : Any = curr_layer.__getattr__(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
__magic_name__ : Any = layer_infos.pop(0 )
elif len(lowerCAmelCase ) == 0:
break
except Exception:
if len(lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__magic_name__ : Optional[int] = layer_infos.pop(0 )
__magic_name__ : Any = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(lowerCAmelCase )
else:
pair_keys.append(lowerCAmelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__magic_name__ : List[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__magic_name__ : Tuple = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase , lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
__magic_name__ : List[str] = state_dict[pair_keys[0]].to(torch.floataa )
__magic_name__ : str = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase , lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(lowerCAmelCase )
return pipeline
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
lowerCAmelCase :List[Any] = parser.parse_args()
lowerCAmelCase :Any = args.base_model_path
lowerCAmelCase :Dict = args.checkpoint_path
lowerCAmelCase :Optional[Any] = args.dump_path
lowerCAmelCase :List[Any] = args.lora_prefix_unet
lowerCAmelCase :List[str] = args.lora_prefix_text_encoder
lowerCAmelCase :Tuple = args.alpha
lowerCAmelCase :Tuple = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCAmelCase :Optional[int] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 331 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.