code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_snake_case = False
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self, __a=32):
'''simple docstring'''
set_seed(0)
_lowerCAmelCase : Optional[int] = UNetaDModel(sample_size=_A, in_channels=3, out_channels=3)
_lowerCAmelCase : Union[str, Any] = torch.optim.SGD(model.parameters(), lr=0.0_001)
return model, optimizer
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_lowerCAmelCase : int = DDPMScheduler(
num_train_timesteps=1000, beta_start=0.0_001, beta_end=0.02, beta_schedule="linear", clip_sample=_A, )
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000, beta_start=0.0_001, beta_end=0.02, beta_schedule="linear", clip_sample=_A, )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_lowerCAmelCase : Any = [torch.randn((4, 3, 32, 32)).clip(-1, 1).to(_A) for _ in range(4)]
_lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 32, 32)).to(_A) for _ in range(4)]
_lowerCAmelCase : List[str] = [torch.randint(0, 1000, (4,)).long().to(_A) for _ in range(4)]
# train with a DDPM scheduler
_lowerCAmelCase : Dict = self.get_model_optimizer(resolution=32)
model.train().to(_A)
for i in range(4):
optimizer.zero_grad()
_lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i], noise[i], timesteps[i])
_lowerCAmelCase : str = model(_A, timesteps[i]).sample
_lowerCAmelCase : Dict = torch.nn.functional.mse_loss(_A, noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_lowerCAmelCase : List[str] = self.get_model_optimizer(resolution=32)
model.train().to(_A)
for i in range(4):
optimizer.zero_grad()
_lowerCAmelCase : Any = ddim_scheduler.add_noise(clean_images[i], noise[i], timesteps[i])
_lowerCAmelCase : int = model(_A, timesteps[i]).sample
_lowerCAmelCase : Tuple = torch.nn.functional.mse_loss(_A, noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_A, _A, atol=1E-5))
self.assertTrue(torch.allclose(_A, _A, atol=1E-5))
| 350
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 0
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=30, __a=2, __a=3, __a=True, __a=True, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=10, __a=0.02, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Union[str, Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : List[Any] = num_patches + 1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = ViTMSNModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.type_sequence_label_size
_lowerCAmelCase : Tuple = ViTMSNForImageClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, labels=__a)
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}")
print("Labels: {labels}")
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Tuple = ViTMSNForImageClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCAmelCase : int = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
_lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase):
lowerCamelCase__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ViTMSNModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(__a)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowerCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a, nn.Linear))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(__a)
_lowerCAmelCase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = ViTMSNModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small") if is_vision_available() else None
@slow
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(2)
_lowerCAmelCase : Optional[Any] = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small").to(__a)
_lowerCAmelCase : int = self.default_image_processor
_lowerCAmelCase : str = prepare_img()
_lowerCAmelCase : str = image_processor(images=__a, return_tensors="pt").to(__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**__a)
# verify the logits
_lowerCAmelCase : List[str] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, __a)
_lowerCAmelCase : int = torch.tensor([-0.0_803, -0.4_454, -0.2_375]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3], __a, atol=1E-4))
| 351
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 0
|
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = len(lowerCAmelCase__ )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase : Dict = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase : Tuple = arr[mi::-1] + arr[mi + 1 : len(lowerCAmelCase__ )]
# Reverse whole list
_lowerCAmelCase : Optional[int] = arr[cur - 1 :: -1] + arr[cur : len(lowerCAmelCase__ )]
cur -= 1
return arr
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 352
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 0
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = checkpoint
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = vae_state_dict["encoder.conv_in.weight"]
_lowerCAmelCase : Dict = vae_state_dict["encoder.conv_in.bias"]
_lowerCAmelCase : int = vae_state_dict["encoder.conv_out.weight"]
_lowerCAmelCase : str = vae_state_dict["encoder.conv_out.bias"]
_lowerCAmelCase : List[str] = vae_state_dict["encoder.norm_out.weight"]
_lowerCAmelCase : List[Any] = vae_state_dict["encoder.norm_out.bias"]
_lowerCAmelCase : List[Any] = vae_state_dict["decoder.conv_in.weight"]
_lowerCAmelCase : Union[str, Any] = vae_state_dict["decoder.conv_in.bias"]
_lowerCAmelCase : Optional[int] = vae_state_dict["decoder.conv_out.weight"]
_lowerCAmelCase : Union[str, Any] = vae_state_dict["decoder.conv_out.bias"]
_lowerCAmelCase : Optional[int] = vae_state_dict["decoder.norm_out.weight"]
_lowerCAmelCase : int = vae_state_dict["decoder.norm_out.bias"]
_lowerCAmelCase : Optional[Any] = vae_state_dict["quant_conv.weight"]
_lowerCAmelCase : Optional[int] = vae_state_dict["quant_conv.bias"]
_lowerCAmelCase : List[str] = vae_state_dict["post_quant_conv.weight"]
_lowerCAmelCase : List[str] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
_lowerCAmelCase : Optional[Any] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
_lowerCAmelCase : Optional[int] = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(_lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_lowerCAmelCase : str = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
_lowerCAmelCase : Optional[Any] = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(_lowerCamelCase )
}
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
_lowerCAmelCase : str = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
_lowerCAmelCase : Dict = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
_lowerCAmelCase : Tuple = renew_vae_resnet_paths(_lowerCamelCase )
_lowerCAmelCase : List[str] = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = [key for key in vae_state_dict if "encoder.mid.block" in key]
_lowerCAmelCase : List[str] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCAmelCase : int = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
_lowerCAmelCase : List[str] = renew_vae_resnet_paths(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
_lowerCAmelCase : List[Any] = [key for key in vae_state_dict if "encoder.mid.attn" in key]
_lowerCAmelCase : Union[str, Any] = renew_vae_attention_paths(_lowerCamelCase )
_lowerCAmelCase : int = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
conv_attn_to_linear(_lowerCamelCase )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = num_up_blocks - 1 - i
_lowerCAmelCase : Optional[int] = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
_lowerCAmelCase : Any = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
_lowerCAmelCase : Any = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
_lowerCAmelCase : str = renew_vae_resnet_paths(_lowerCamelCase )
_lowerCAmelCase : Dict = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
_lowerCAmelCase : List[str] = [key for key in vae_state_dict if "decoder.mid.block" in key]
_lowerCAmelCase : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCAmelCase : Optional[int] = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
_lowerCAmelCase : int = renew_vae_resnet_paths(_lowerCamelCase )
_lowerCAmelCase : List[str] = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
_lowerCAmelCase : List[Any] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
_lowerCAmelCase : Optional[Any] = renew_vae_attention_paths(_lowerCamelCase )
_lowerCAmelCase : Any = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
conv_attn_to_linear(_lowerCamelCase )
return new_checkpoint
def A ( _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
_lowerCAmelCase : Union[str, Any] = io.BytesIO(r.content )
_lowerCAmelCase : List[Any] = OmegaConf.load(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : str = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
_lowerCAmelCase : Dict = {}
with safe_open(_lowerCamelCase , framework="pt" , device="cpu" ) as f:
for key in f.keys():
_lowerCAmelCase : Optional[int] = f.get_tensor(_lowerCamelCase )
else:
_lowerCAmelCase : List[str] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )["state_dict"]
# Convert the VAE model.
_lowerCAmelCase : Optional[Any] = create_vae_diffusers_config(_lowerCamelCase , image_size=_lowerCamelCase )
_lowerCAmelCase : List[str] = custom_convert_ldm_vae_checkpoint(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = AutoencoderKL(**_lowerCamelCase )
vae.load_state_dict(_lowerCamelCase )
vae.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
_snake_case = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 353
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 0
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case = False, False, False
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
lowerCamelCase__ = field(default='Audio' , init=lowercase_ , repr=lowercase_)
def __call__( self):
'''simple docstring'''
return self.pa_type
def snake_case__ ( self, __a):
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
if isinstance(a__, a__):
return {"bytes": None, "path": value}
elif isinstance(a__, a__):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCAmelCase : Optional[Any] = BytesIO()
sf.write(a__, value["array"], value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm"):
# "PCM" only has raw audio bytes
if value.get("sampling_rate") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
if value.get("bytes"):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCAmelCase : Optional[int] = np.frombuffer(value["bytes"], dtype=np.intaa).astype(np.floataa) / 3_2767
else:
_lowerCAmelCase : Optional[int] = np.memmap(value["path"], dtype="h", mode="r").astype(np.floataa) / 3_2767
_lowerCAmelCase : Tuple = BytesIO(bytes())
sf.write(a__, a__, value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.")
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
_lowerCAmelCase , _lowerCAmelCase : int = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
_lowerCAmelCase : Any = xsplitext(a__)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ")
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ")
if file is None:
_lowerCAmelCase : List[Any] = token_per_repo_id or {}
_lowerCAmelCase : Dict = path.split("::")[-1]
try:
_lowerCAmelCase : List[Any] = string_to_dict(a__, config.HUB_DATASETS_URL)["repo_id"]
_lowerCAmelCase : Union[str, Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCAmelCase : Union[str, Any] = None
with xopen(a__, "rb", use_auth_token=a__) as f:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = sf.read(a__)
else:
_lowerCAmelCase , _lowerCAmelCase : Dict = sf.read(a__)
_lowerCAmelCase : List[str] = array.T
if self.mono:
_lowerCAmelCase : str = librosa.to_mono(a__)
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCAmelCase : Optional[Any] = librosa.resample(a__, orig_sr=a__, target_sr=self.sampling_rate)
_lowerCAmelCase : Any = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def snake_case__ ( self):
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature.")
return {
"bytes": Value("binary"),
"path": Value("string"),
}
def snake_case__ ( self, __a):
'''simple docstring'''
if pa.types.is_string(storage.type):
_lowerCAmelCase : Optional[Any] = pa.array([None] * len(a__), type=pa.binary())
_lowerCAmelCase : Dict = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
_lowerCAmelCase : int = pa.array([None] * len(a__), type=pa.string())
_lowerCAmelCase : str = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
_lowerCAmelCase : Union[str, Any] = pa.array([Audio().encode_example(a__) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
_lowerCAmelCase : str = storage.field("bytes")
else:
_lowerCAmelCase : Any = pa.array([None] * len(a__), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
_lowerCAmelCase : str = storage.field("path")
else:
_lowerCAmelCase : Union[str, Any] = pa.array([None] * len(a__), type=pa.string())
_lowerCAmelCase : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
return array_cast(a__, self.pa_type)
def snake_case__ ( self, __a):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__a):
with xopen(a__, "rb") as f:
_lowerCAmelCase : Dict = f.read()
return bytes_
_lowerCAmelCase : Tuple = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
_lowerCAmelCase : Tuple = pa.array(
[os.path.basename(a__) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), )
_lowerCAmelCase : int = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(a__, self.pa_type)
| 354
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
lowerCamelCase__ = AltDiffusionPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, )
_lowerCAmelCase : int = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=UpperCamelCase_, set_alpha_to_one=UpperCamelCase_, )
torch.manual_seed(0)
_lowerCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0)
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5002, )
_lowerCAmelCase : Union[str, Any] = CLIPTextModel(UpperCamelCase_)
_lowerCAmelCase : List[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
_lowerCAmelCase : List[str] = 77
_lowerCAmelCase : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self, __a, __a=0):
'''simple docstring'''
if str(UpperCamelCase_).startswith("mps"):
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(UpperCamelCase_)
else:
_lowerCAmelCase : int = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
_lowerCAmelCase : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3)
def snake_case__ ( self):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
torch.manual_seed(0)
_lowerCAmelCase : Tuple = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, vocab_size=5002, )
# TODO: remove after fixing the non-deterministic text encoder
_lowerCAmelCase : int = RobertaSeriesModelWithTransformation(UpperCamelCase_)
_lowerCAmelCase : Tuple = text_encoder
_lowerCAmelCase : Any = AltDiffusionPipeline(**UpperCamelCase_)
_lowerCAmelCase : Any = alt_pipe.to(UpperCamelCase_)
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_)
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(UpperCamelCase_)
_lowerCAmelCase : Dict = "A photo of an astronaut"
_lowerCAmelCase : str = alt_pipe(**UpperCamelCase_)
_lowerCAmelCase : int = output.images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Dict = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : str = self.get_dummy_components()
_lowerCAmelCase : Dict = PNDMScheduler(skip_prk_steps=UpperCamelCase_)
torch.manual_seed(0)
_lowerCAmelCase : List[str] = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, vocab_size=5002, )
# TODO: remove after fixing the non-deterministic text encoder
_lowerCAmelCase : str = RobertaSeriesModelWithTransformation(UpperCamelCase_)
_lowerCAmelCase : Optional[Any] = text_encoder
_lowerCAmelCase : List[str] = AltDiffusionPipeline(**UpperCamelCase_)
_lowerCAmelCase : List[Any] = alt_pipe.to(UpperCamelCase_)
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_)
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(UpperCamelCase_)
_lowerCAmelCase : str = alt_pipe(**UpperCamelCase_)
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : int = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", safety_checker=UpperCamelCase_)
_lowerCAmelCase : List[str] = alt_pipe.to(UpperCamelCase_)
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_)
_lowerCAmelCase : List[Any] = "A painting of a squirrel eating a burger"
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0)
_lowerCAmelCase : Optional[int] = alt_pipe([prompt], generator=UpperCamelCase_, guidance_scale=6.0, num_inference_steps=20, output_type="np")
_lowerCAmelCase : str = output.images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[int] = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = DDIMScheduler.from_pretrained("BAAI/AltDiffusion", subfolder="scheduler")
_lowerCAmelCase : Dict = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", scheduler=UpperCamelCase_, safety_checker=UpperCamelCase_)
_lowerCAmelCase : Union[str, Any] = alt_pipe.to(UpperCamelCase_)
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_)
_lowerCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_lowerCAmelCase : Tuple = torch.manual_seed(0)
_lowerCAmelCase : str = alt_pipe([prompt], generator=UpperCamelCase_, num_inference_steps=2, output_type="numpy")
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[str] = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 355
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 0
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowerCAmelCase : Any = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_lowerCAmelCase : int = F"{src_lang}-{tgt_lang}"
_lowerCAmelCase : List[Any] = F"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ )
_lowerCAmelCase : Union[str, Any] = os.path.join(lowerCamelCase_ , "README.md" )
print(F"Generating {path}" )
with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(lowerCamelCase_ )
# make sure we are under the root of the project
_snake_case = Path(__file__).resolve().parent.parent.parent
_snake_case = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_snake_case = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 356
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 0
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class UpperCAmelCase_ :
def __init__( self, __a=None, **__a):
'''simple docstring'''
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future.")
_lowerCAmelCase : int = model
_lowerCAmelCase : str = kwargs.get("model_save_dir", _A)
_lowerCAmelCase : Union[str, Any] = kwargs.get("latest_model_name", _A)
def __call__( self, **__a):
'''simple docstring'''
_lowerCAmelCase : Dict = {k: np.array(_A) for k, v in kwargs.items()}
return self.model.run(_A, _A)
@staticmethod
def snake_case__ ( __a, __a=None, __a=None):
'''simple docstring'''
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider")
_lowerCAmelCase : Optional[Any] = 'CPUExecutionProvider'
return ort.InferenceSession(_A, providers=[provider], sess_options=_A)
def snake_case__ ( self, __a, __a = None, **__a):
'''simple docstring'''
_lowerCAmelCase : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_lowerCAmelCase : Any = self.model_save_dir.joinpath(self.latest_model_name)
_lowerCAmelCase : Union[str, Any] = Path(_A).joinpath(_A)
try:
shutil.copyfile(_A, _A)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_lowerCAmelCase : Optional[Any] = self.model_save_dir.joinpath(_A)
if src_path.exists():
_lowerCAmelCase : Tuple = Path(_A).joinpath(_A)
try:
shutil.copyfile(_A, _A)
except shutil.SameFileError:
pass
def snake_case__ ( self, __a, **__a, ):
'''simple docstring'''
if os.path.isfile(_A):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(_A, exist_ok=_A)
# saving model weights/files
self._save_pretrained(_A, **_A)
@classmethod
def snake_case__ ( cls, __a, __a = None, __a = None, __a = False, __a = None, __a = None, __a = None, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_A):
_lowerCAmelCase : int = OnnxRuntimeModel.load_model(
os.path.join(_A, _A), provider=_A, sess_options=_A)
_lowerCAmelCase : Tuple = Path(_A)
# load model from hub
else:
# download model
_lowerCAmelCase : int = hf_hub_download(
repo_id=_A, filename=_A, use_auth_token=_A, revision=_A, cache_dir=_A, force_download=_A, )
_lowerCAmelCase : Any = Path(_A).parent
_lowerCAmelCase : List[str] = Path(_A).name
_lowerCAmelCase : int = OnnxRuntimeModel.load_model(_A, provider=_A, sess_options=_A)
return cls(model=_A, **_A)
@classmethod
def snake_case__ ( cls, __a, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = None
if len(str(_A).split("@")) == 2:
_lowerCAmelCase : int = model_id.split("@")
return cls._from_pretrained(
model_id=_A, revision=_A, cache_dir=_A, force_download=_A, use_auth_token=_A, **_A, )
| 357
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 0
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = F"{sampling_rate}"
_lowerCAmelCase : int = '1'
_lowerCAmelCase : Tuple = 'f32le'
_lowerCAmelCase : int = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_A , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_lowerCAmelCase : Dict = ffmpeg_process.communicate(_A )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
_lowerCAmelCase : Union[str, Any] = output_stream[0]
_lowerCAmelCase : Dict = np.frombuffer(_A , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "f32le" , ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"{sampling_rate}"
_lowerCAmelCase : Any = '1'
if format_for_conversion == "s16le":
_lowerCAmelCase : Optional[int] = 2
elif format_for_conversion == "f32le":
_lowerCAmelCase : Optional[int] = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
_lowerCAmelCase : Optional[Any] = platform.system()
if system == "Linux":
_lowerCAmelCase : Optional[int] = 'alsa'
_lowerCAmelCase : Union[str, Any] = 'default'
elif system == "Darwin":
_lowerCAmelCase : Dict = 'avfoundation'
_lowerCAmelCase : Optional[int] = ':0'
elif system == "Windows":
_lowerCAmelCase : Optional[int] = 'dshow'
_lowerCAmelCase : int = 'default'
_lowerCAmelCase : Optional[Any] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_lowerCAmelCase : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_lowerCAmelCase : Optional[int] = _ffmpeg_stream(_A , _A )
for item in iterator:
yield item
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
_lowerCAmelCase : Union[str, Any] = stream_chunk_s
else:
_lowerCAmelCase : str = chunk_length_s
_lowerCAmelCase : List[Any] = ffmpeg_microphone(_A , _A , format_for_conversion=_A )
if format_for_conversion == "s16le":
_lowerCAmelCase : Tuple = np.intaa
_lowerCAmelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
_lowerCAmelCase : Union[str, Any] = np.floataa
_lowerCAmelCase : Optional[int] = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
_lowerCAmelCase : List[str] = chunk_length_s / 6
_lowerCAmelCase : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_A , (int, float) ):
_lowerCAmelCase : Any = [stride_length_s, stride_length_s]
_lowerCAmelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_lowerCAmelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_lowerCAmelCase : List[Any] = datetime.datetime.now()
_lowerCAmelCase : Any = datetime.timedelta(seconds=_A )
for item in chunk_bytes_iter(_A , _A , stride=(stride_left, stride_right) , stream=_A ):
# Put everything back in numpy scale
_lowerCAmelCase : List[str] = np.frombuffer(item["raw"] , dtype=_A )
_lowerCAmelCase : List[Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_lowerCAmelCase : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase : int = B''
_lowerCAmelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
_lowerCAmelCase : Dict = 0
for raw in iterator:
acc += raw
if stream and len(_A ) < chunk_len:
_lowerCAmelCase : List[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_A ) >= chunk_len:
# We are flushing the accumulator
_lowerCAmelCase : Optional[int] = (_stride_left, stride_right)
_lowerCAmelCase : Tuple = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_lowerCAmelCase : int = False
yield item
_lowerCAmelCase : Dict = stride_left
_lowerCAmelCase : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_A ) > stride_left:
_lowerCAmelCase : Optional[int] = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_lowerCAmelCase : int = False
yield item
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 2**24 # 16Mo
try:
with subprocess.Popen(_A , stdout=subprocess.PIPE , bufsize=_A ) as ffmpeg_process:
while True:
_lowerCAmelCase : Dict = ffmpeg_process.stdout.read(_A )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 358
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 0
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=16, __a=36, __a=6, __a=6, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : str = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Dict = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : str = embedding_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Any = num_hidden_groups
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Union[str, Any] = scope
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, num_hidden_groups=self.num_hidden_groups, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = AlbertModel(config=A__)
model.to(A__)
model.eval()
_lowerCAmelCase : str = model(A__, attention_mask=A__, token_type_ids=A__)
_lowerCAmelCase : Tuple = model(A__, token_type_ids=A__)
_lowerCAmelCase : Optional[Any] = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = AlbertForPreTraining(config=A__)
model.to(A__)
model.eval()
_lowerCAmelCase : Optional[Any] = model(
A__, attention_mask=A__, token_type_ids=A__, labels=A__, sentence_order_label=A__, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = AlbertForMaskedLM(config=A__)
model.to(A__)
model.eval()
_lowerCAmelCase : Optional[int] = model(A__, attention_mask=A__, token_type_ids=A__, labels=A__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AlbertForQuestionAnswering(config=A__)
model.to(A__)
model.eval()
_lowerCAmelCase : Any = model(
A__, attention_mask=A__, token_type_ids=A__, start_positions=A__, end_positions=A__, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = AlbertForSequenceClassification(A__)
model.to(A__)
model.eval()
_lowerCAmelCase : List[str] = model(A__, attention_mask=A__, token_type_ids=A__, labels=A__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Optional[Any] = AlbertForTokenClassification(config=A__)
model.to(A__)
model.eval()
_lowerCAmelCase : str = model(A__, attention_mask=A__, token_type_ids=A__, labels=A__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_choices
_lowerCAmelCase : Dict = AlbertForMultipleChoice(config=A__)
model.to(A__)
model.eval()
_lowerCAmelCase : Any = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowerCAmelCase : Optional[int] = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowerCAmelCase : int = model(
A__, attention_mask=A__, token_type_ids=A__, labels=A__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Any = config_and_inputs
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
lowerCamelCase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Any = super()._prepare_for_class(A__, A__, return_labels=A__)
if return_labels:
if model_class in get_values(A__):
_lowerCAmelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=A__)
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A__)
return inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AlbertModelTester(self)
_lowerCAmelCase : int = ConfigTester(self, config_class=A__, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*A__)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[Any] = AlbertModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = AlbertModel.from_pretrained("albert-base-v2")
_lowerCAmelCase : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
_lowerCAmelCase : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(A__, attention_mask=A__)[0]
_lowerCAmelCase : Dict = torch.Size((1, 11, 768))
self.assertEqual(output.shape, A__)
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], A__, atol=1E-4))
| 359
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 0
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if "." in tensor_name:
_lowerCAmelCase : Union[str, Any] = tensor_name.split("." )
for split in splits[:-1]:
_lowerCAmelCase : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_lowerCAmelCase : Union[str, Any] = new_module
_lowerCAmelCase : List[Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_lowerCAmelCase : Tuple = tensor_name in module._buffers
_lowerCAmelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : List[Any] = False
if is_buffer or not is_bitsandbytes_available():
_lowerCAmelCase : str = False
_lowerCAmelCase : str = False
else:
_lowerCAmelCase : Dict = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_lowerCAmelCase : List[Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_lowerCAmelCase : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_lowerCAmelCase : str = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
_lowerCAmelCase : Union[str, Any] = value.to("cpu" )
if value.dtype == torch.inta:
_lowerCAmelCase : Optional[int] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
_lowerCAmelCase : int = torch.tensor(_lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowerCamelCase ) and fpaa_statistics is None:
_lowerCAmelCase : Any = new_value.T
_lowerCAmelCase : Union[str, Any] = old_value.__dict__
if is_abit:
_lowerCAmelCase : Any = bnb.nn.IntaParams(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
elif is_abit:
_lowerCAmelCase : Union[str, Any] = bnb.nn.Paramsabit(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Tuple = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(_lowerCamelCase ) )
else:
if value is None:
_lowerCAmelCase : Optional[int] = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
_lowerCAmelCase : int = value.to(_lowerCamelCase )
else:
_lowerCAmelCase : List[str] = torch.tensor(_lowerCamelCase , device=_lowerCamelCase )
if is_buffer:
_lowerCAmelCase : List[str] = new_value
else:
_lowerCAmelCase : int = nn.Parameter(_lowerCamelCase , requires_grad=old_value.requires_grad )
_lowerCAmelCase : Union[str, Any] = new_value
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
_lowerCAmelCase : List[str] = []
current_key_name.append(_lowerCamelCase )
if (isinstance(_lowerCamelCase , nn.Linear ) or isinstance(_lowerCamelCase , _lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(_lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = module.weight.shape
else:
_lowerCAmelCase : Tuple = module.in_features
_lowerCAmelCase : Any = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_lowerCAmelCase : List[Any] = bnb.nn.LinearabitLt(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_lowerCAmelCase : Dict = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_lowerCAmelCase : str = bnb.nn.Linearabit(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_lowerCAmelCase : Optional[Any] = True
# Store the module class in case we need to transpose the weight later
_lowerCAmelCase : str = type(_lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowerCamelCase )
if len(list(module.children() ) ) > 0:
_lowerCAmelCase , _lowerCAmelCase : int = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_been_replaced=_lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
_lowerCAmelCase , _lowerCAmelCase : List[Any] = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def A ( *_lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _lowerCamelCase , )
return replace_with_bnb_linear(*_lowerCamelCase , **_lowerCamelCase )
def A ( *_lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _lowerCamelCase , )
return set_module_quantized_tensor_to_device(*_lowerCamelCase , **_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_lowerCAmelCase : List[Any] = find_tied_parameters(_lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCAmelCase : Union[str, Any] = sum(_lowerCamelCase , [] )
_lowerCAmelCase : List[Any] = len(_lowerCamelCase ) > 0
# Check if it is a base model
_lowerCAmelCase : Optional[int] = not hasattr(_lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCAmelCase : Tuple = list(model.named_children() )
_lowerCAmelCase : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCAmelCase : Any = set(_lowerCamelCase ) - set(_lowerCamelCase )
_lowerCAmelCase : List[str] = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase )
# remove ".weight" from the keys
_lowerCAmelCase : Tuple = [".weight", ".bias"]
_lowerCAmelCase : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCAmelCase : str = name.replace(_lowerCamelCase , "" )
filtered_module_names.append(_lowerCamelCase )
return filtered_module_names
| 360
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 0
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase_ ( lowerCamelCase__):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__A, "hidden_sizes"))
self.parent.assertTrue(hasattr(__A, "num_attention_heads"))
self.parent.assertTrue(hasattr(__A, "num_encoder_blocks"))
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=64, __a=3, __a=4, __a=[2, 2, 2, 2], __a=[8, 4, 2, 1], __a=[16, 32, 64, 128], __a=[1, 4, 8, 16], __a=[1, 2, 4, 8], __a=True, __a=True, __a="gelu", __a=0.1, __a=0.1, __a=0.02, __a=3, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = num_encoder_blocks
_lowerCAmelCase : int = sr_ratios
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : List[Any] = hidden_sizes
_lowerCAmelCase : Tuple = downsampling_rates
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : Tuple = scope
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = SegformerModel(config=__A)
model.to(__A)
model.eval()
_lowerCAmelCase : Optional[Any] = model(__A)
_lowerCAmelCase : Tuple = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Dict = SegformerForSemanticSegmentation(__A)
model.to(__A)
model.eval()
_lowerCAmelCase : Optional[Any] = model(__A)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
_lowerCAmelCase : Union[str, Any] = model(__A, labels=__A)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss, 0.0)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = 1
_lowerCAmelCase : Dict = SegformerForSemanticSegmentation(config=__A)
model.to(__A)
model.eval()
_lowerCAmelCase : str = torch.randint(0, 1, (self.batch_size, self.image_size, self.image_size)).to(__A)
_lowerCAmelCase : List[str] = model(__A, labels=__A)
self.parent.assertGreater(result.loss, 0.0)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase : Optional[Any] = config_and_inputs
_lowerCAmelCase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
lowerCamelCase__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = SegformerModelTester(self)
_lowerCAmelCase : Optional[Any] = SegformerConfigTester(self, config_class=__A)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__A)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__A)
@unittest.skip("SegFormer does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(__A)
_lowerCAmelCase : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Tuple = True
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Dict = True
_lowerCAmelCase : int = model_class(__A)
model.to(__A)
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(__A, __A))
_lowerCAmelCase : List[str] = outputs.attentions
_lowerCAmelCase : str = sum(self.model_tester.depths)
self.assertEqual(len(__A), __A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : List[str] = model_class(__A)
model.to(__A)
model.eval()
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**self._prepare_for_class(__A, __A))
_lowerCAmelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(__A), __A)
# verify the first attentions (first block, first layer)
_lowerCAmelCase : Optional[int] = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], )
# verify the last attentions (last block, last layer)
_lowerCAmelCase : Union[str, Any] = (self.model_tester.image_size // 32) ** 2
_lowerCAmelCase : Dict = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], )
_lowerCAmelCase : Dict = len(__A)
# Check attention is always last and order is fine
_lowerCAmelCase : str = True
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : List[Any] = model_class(__A)
model.to(__A)
model.eval()
with torch.no_grad():
_lowerCAmelCase : Any = model(**self._prepare_for_class(__A, __A))
self.assertEqual(out_len + 1, len(__A))
_lowerCAmelCase : Optional[int] = outputs.attentions
self.assertEqual(len(__A), __A)
# verify the first attentions (first block, first layer)
_lowerCAmelCase : Any = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], )
def snake_case__ ( self):
'''simple docstring'''
def check_hidden_states_output(__a, __a, __a):
_lowerCAmelCase : int = model_class(__A)
model.to(__A)
model.eval()
with torch.no_grad():
_lowerCAmelCase : Any = model(**self._prepare_for_class(__A, __A))
_lowerCAmelCase : Tuple = outputs.hidden_states
_lowerCAmelCase : Dict = self.model_tester.num_encoder_blocks
self.assertEqual(len(__A), __A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]), [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(__A, __A, __A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Dict = True
check_hidden_states_output(__A, __A, __A)
def snake_case__ ( self):
'''simple docstring'''
if not self.model_tester.is_training:
return
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : str = True
for model_class in self.all_model_classes:
if model_class in get_values(__A):
continue
_lowerCAmelCase : Optional[Any] = model_class(__A)
model.to(__A)
model.train()
_lowerCAmelCase : Tuple = self._prepare_for_class(__A, __A, return_labels=__A)
_lowerCAmelCase : str = model(**__A).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = SegformerModel.from_pretrained(__A)
self.assertIsNotNone(__A)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=__A, align=__A, do_random_crop=__A)
_lowerCAmelCase : Tuple = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
__A)
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[Any] = image_processor(images=__A, return_tensors="pt")
_lowerCAmelCase : List[Any] = encoded_inputs.pixel_values.to(__A)
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(__A)
_lowerCAmelCase : int = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape, __A)
_lowerCAmelCase : Any = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
]).to(__A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], __A, atol=1E-4))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=__A, align=__A, do_random_crop=__A)
_lowerCAmelCase : List[str] = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024").to(__A)
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=__A, return_tensors="pt")
_lowerCAmelCase : List[Any] = encoded_inputs.pixel_values.to(__A)
with torch.no_grad():
_lowerCAmelCase : str = model(__A)
_lowerCAmelCase : Any = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape, __A)
_lowerCAmelCase : Any = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
]).to(__A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], __A, atol=1E-1))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=__A, align=__A, do_random_crop=__A)
_lowerCAmelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
__A)
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[Any] = image_processor(images=__A, return_tensors="pt")
_lowerCAmelCase : Dict = encoded_inputs.pixel_values.to(__A)
with torch.no_grad():
_lowerCAmelCase : str = model(__A)
_lowerCAmelCase : Tuple = outputs.logits.detach().cpu()
_lowerCAmelCase : str = image_processor.post_process_semantic_segmentation(outputs=__A, target_sizes=[(500, 300)])
_lowerCAmelCase : List[Any] = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape, __A)
_lowerCAmelCase : str = image_processor.post_process_semantic_segmentation(outputs=__A)
_lowerCAmelCase : List[str] = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape, __A)
| 361
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 0
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_snake_case = "bert-base-cased"
_snake_case = "fp16"
_snake_case = "bf16"
_snake_case = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Tuple = dict(
ACCELERATE_USE_FSDP="true", MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1", )
def snake_case__ ( self):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__a):
_lowerCAmelCase : List[str] = self.dist_env.copy()
_lowerCAmelCase : List[Any] = f"{i + 1}"
_lowerCAmelCase : List[str] = strategy
with mockenv_context(**__a):
_lowerCAmelCase : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy, ShardingStrategy(i + 1))
def snake_case__ ( self):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__a):
_lowerCAmelCase : Union[str, Any] = self.dist_env.copy()
_lowerCAmelCase : Optional[Any] = prefetch_policy
with mockenv_context(**__a):
_lowerCAmelCase : Optional[Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch, BackwardPrefetch(i + 1))
def snake_case__ ( self):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__a):
_lowerCAmelCase : int = self.dist_env.copy()
_lowerCAmelCase : List[Any] = state_dict_type
with mockenv_context(**__a):
_lowerCAmelCase : Optional[int] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type, StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoModel.from_pretrained(__a)
for policy in FSDP_AUTO_WRAP_POLICY:
_lowerCAmelCase : Union[str, Any] = self.dist_env.copy()
_lowerCAmelCase : Tuple = policy
if policy == "TRANSFORMER_BASED_WRAP":
_lowerCAmelCase : Any = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
_lowerCAmelCase : int = "2000"
with mockenv_context(**__a):
_lowerCAmelCase : Dict = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
_lowerCAmelCase : Dict = self.dist_env.copy()
_lowerCAmelCase : int = "TRANSFORMER_BASED_WRAP"
_lowerCAmelCase : int = "T5Layer"
with mockenv_context(**__a):
_lowerCAmelCase : str = FullyShardedDataParallelPlugin()
with self.assertRaises(__a) as cm:
fsdp_plugin.set_auto_wrap_policy(__a)
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception))
_lowerCAmelCase : List[str] = self.dist_env.copy()
_lowerCAmelCase : Dict = "SIZE_BASED_WRAP"
_lowerCAmelCase : Optional[int] = "0"
with mockenv_context(**__a):
_lowerCAmelCase : Dict = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def snake_case__ ( self):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_lowerCAmelCase : Tuple = self.dist_env.copy()
_lowerCAmelCase : List[str] = mp_dtype
with mockenv_context(**__a):
_lowerCAmelCase : Union[str, Any] = Accelerator()
if mp_dtype == "fp16":
_lowerCAmelCase : Dict = torch.floataa
elif mp_dtype == "bf16":
_lowerCAmelCase : Optional[Any] = torch.bfloataa
_lowerCAmelCase : Tuple = MixedPrecision(param_dtype=__a, reduce_dtype=__a, buffer_dtype=__a)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy, __a)
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler, __a))
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(__a)
def snake_case__ ( self):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_lowerCAmelCase : Union[str, Any] = self.dist_env.copy()
_lowerCAmelCase : List[Any] = str(__a).lower()
with mockenv_context(**__a):
_lowerCAmelCase : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload, CPUOffload(offload_params=__a))
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = 0.82
_lowerCAmelCase : Dict = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
_lowerCAmelCase : Union[str, Any] = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_lowerCAmelCase : Any = 160
_lowerCAmelCase : int = 160
_lowerCAmelCase : List[str] = inspect.getfile(accelerate.test_utils)
_lowerCAmelCase : int = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = os.path.join(self.test_scripts_folder, "test_performance.py")
_lowerCAmelCase : Optional[int] = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
_lowerCAmelCase : List[Any] = cmd.copy()
for i, strategy in enumerate(__a):
if strategy.lower() in config:
cmd_config.append(f"--fsdp_sharding_strategy={i+1}")
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no")
else:
cmd_config.append("--mixed_precision=fp16")
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"--fsdp_auto_wrap_policy={policy}")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--performance_lower_bound={self.performance_lower_bound}",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__a, env=os.environ.copy())
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = os.path.join(self.test_scripts_folder, "test_checkpointing.py")
_lowerCAmelCase : Tuple = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(__a):
_lowerCAmelCase : List[Any] = cmd.copy()
cmd_config.append(f"--fsdp_sharding_strategy={i+1}")
if strategy != "FULL_SHARD":
continue
_lowerCAmelCase : int = len(__a)
for state_dict_type in FSDP_STATE_DICT_TYPE:
_lowerCAmelCase : int = cmd_config[:state_dict_config_index]
cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}")
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
"--partial_train_epoch=1",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__a, env=os.environ.copy())
_lowerCAmelCase : Optional[int] = cmd_config[:-1]
_lowerCAmelCase : Tuple = os.path.join(self.tmpdir, "epoch_0")
cmd_config.extend(
[
f"--resume_from_checkpoint={resume_from_checkpoint}",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__a, env=os.environ.copy())
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = os.path.join(self.test_scripts_folder, "test_peak_memory_usage.py")
_lowerCAmelCase : List[str] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_lowerCAmelCase : List[str] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"])
else:
cmd_config.extend(["--mixed_precision=no"])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"])
for i, strategy in enumerate(__a):
if strategy.lower() in spec:
cmd_config.append(f"--fsdp_sharding_strategy={i+1}")
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"--fsdp_auto_wrap_policy={policy}")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--peak_memory_upper_bound={peak_mem_upper_bound}",
f"--n_train={self.n_train}",
f"--n_val={self.n_val}",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__a, env=os.environ.copy())
| 362
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 0
|
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = len(UpperCAmelCase__ )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase : Optional[Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(UpperCAmelCase__ )]
# Reverse whole list
_lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(UpperCAmelCase__ )]
cur -= 1
return arr
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 363
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( snake_case__):
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """Pix2StructImageProcessor"""
lowerCamelCase__ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = False
super().__init__(_A, _A)
def __call__( self, __a=None, __a = None, __a = True, __a = False, __a = None, __a = None, __a = 2048, __a = 0, __a = None, __a = None, __a = False, __a = False, __a = False, __a = False, __a = False, __a = True, __a = None, **__a, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text.")
# Get only text
if images is None and not self.image_processor.is_vqa:
_lowerCAmelCase : List[Any] = self.tokenizer
_lowerCAmelCase : Tuple = self.tokenizer(
text=_A, add_special_tokens=_A, padding=_A, truncation=_A, max_length=_A, stride=_A, pad_to_multiple_of=_A, return_attention_mask=_A, return_overflowing_tokens=_A, return_special_tokens_mask=_A, return_offsets_mapping=_A, return_token_type_ids=_A, return_length=_A, verbose=_A, return_tensors=_A, **_A, )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_lowerCAmelCase : Tuple = self.image_processor(
_A, return_tensors=_A, max_patches=_A, **_A)
else:
# add pixel_values and bbox
_lowerCAmelCase : str = self.image_processor(
_A, return_tensors=_A, max_patches=_A, header_text=_A, **_A)
if text is not None and not self.image_processor.is_vqa:
_lowerCAmelCase : List[str] = self.tokenizer(
text=_A, add_special_tokens=_A, padding=_A, truncation=_A, max_length=_A, stride=_A, pad_to_multiple_of=_A, return_attention_mask=_A, return_overflowing_tokens=_A, return_special_tokens_mask=_A, return_offsets_mapping=_A, return_token_type_ids=_A, return_length=_A, verbose=_A, return_tensors=_A, **_A, )
if "attention_mask" in text_encoding:
_lowerCAmelCase : Any = text_encoding.pop("attention_mask")
if "input_ids" in text_encoding:
_lowerCAmelCase : List[Any] = text_encoding.pop("input_ids")
else:
_lowerCAmelCase : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(_A)
return encoding_image_processor
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A, **_A)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*_A, **_A)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer.model_input_names
_lowerCAmelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 364
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 0
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCAmelCase_ ( a__ , a__):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 1000, __a = None):
'''simple docstring'''
self.set_timesteps(SCREAMING_SNAKE_CASE_)
# standard deviation of the initial noise distribution
_lowerCAmelCase : Optional[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_lowerCAmelCase : Any = 4
# running values
_lowerCAmelCase : Union[str, Any] = []
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = num_inference_steps
_lowerCAmelCase : str = torch.linspace(1, 0, num_inference_steps + 1)[:-1]
_lowerCAmelCase : List[Any] = torch.cat([steps, torch.tensor([0.0])])
if self.config.trained_betas is not None:
_lowerCAmelCase : Tuple = torch.tensor(self.config.trained_betas, dtype=torch.floataa)
else:
_lowerCAmelCase : str = torch.sin(steps * math.pi / 2) ** 2
_lowerCAmelCase : str = (1.0 - self.betas**2) ** 0.5
_lowerCAmelCase : Tuple = (torch.atana(self.betas, self.alphas) / math.pi * 2)[:-1]
_lowerCAmelCase : Dict = timesteps.to(SCREAMING_SNAKE_CASE_)
_lowerCAmelCase : List[Any] = []
def snake_case__ ( self, __a, __a, __a, __a = True, ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler")
_lowerCAmelCase : Optional[int] = (self.timesteps == timestep).nonzero().item()
_lowerCAmelCase : Union[str, Any] = timestep_index + 1
_lowerCAmelCase : Dict = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(SCREAMING_SNAKE_CASE_)
if len(self.ets) == 1:
_lowerCAmelCase : Optional[int] = self.ets[-1]
elif len(self.ets) == 2:
_lowerCAmelCase : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
_lowerCAmelCase : Any = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_lowerCAmelCase : Optional[int] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_lowerCAmelCase : Any = self._get_prev_sample(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_)
def snake_case__ ( self, __a, *__a, **__a):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.alphas[timestep_index]
_lowerCAmelCase : List[Any] = self.betas[timestep_index]
_lowerCAmelCase : int = self.alphas[prev_timestep_index]
_lowerCAmelCase : Optional[Any] = self.betas[prev_timestep_index]
_lowerCAmelCase : int = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE_, 1E-8)
_lowerCAmelCase : Any = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 365
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 0
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_lowerCAmelCase : str = str(lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = "".join(sorted(lowerCAmelCase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def A ( _lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Union[str, Any] = 1
while True:
if check_bouncy(lowerCAmelCase__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 366
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 0
|
from __future__ import annotations
from math import pi
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_snake_case = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class UpperCAmelCase_ ( _UpperCamelCase):
lowerCamelCase__ = 'albert'
def __init__( self, __a=3_0000, __a=128, __a=4096, __a=12, __a=1, __a=64, __a=1_6384, __a=1, __a="gelu_new", __a=0, __a=0, __a=512, __a=2, __a=0.02, __a=1E-12, __a=0.1, __a="absolute", __a=0, __a=2, __a=3, **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase)
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : List[str] = embedding_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_hidden_groups
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : List[str] = inner_group_num
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : str = type_vocab_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : List[str] = classifier_dropout_prob
_lowerCAmelCase : str = position_embedding_type
class UpperCAmelCase_ ( _UpperCamelCase):
@property
def snake_case__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 369
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 0
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" )
_lowerCAmelCase : int = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
_lowerCAmelCase : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_lowerCAmelCase : Any = v
else:
_lowerCAmelCase : List[str] = v
_lowerCAmelCase : Optional[Any] = chkpt["params"]
_lowerCAmelCase : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )}
_lowerCAmelCase : Optional[Any] = chkpt["dico_word2id"]
_lowerCAmelCase : Optional[int] = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
_lowerCAmelCase : Union[str, Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_lowerCAmelCase : List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
_lowerCAmelCase : Dict = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE , indent=2 ) + "\n" )
print(F"Save vocab file to {pytorch_config_dump_path}" )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE , indent=2 ) + "\n" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 370
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 0
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : List[str] = ksize + 1
_lowerCAmelCase : Optional[Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
# distance from center
_lowerCAmelCase : Dict = x - ksize // 2
_lowerCAmelCase : Optional[Any] = y - ksize // 2
# degree to radiant
_lowerCAmelCase : Any = theta / 180 * np.pi
_lowerCAmelCase : List[str] = np.cos(_theta )
_lowerCAmelCase : Union[str, Any] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : List[str] = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : Union[str, Any] = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Tuple = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 371
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 350
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 0
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("google/mt5-small")
_lowerCAmelCase : str = tokenizer("Hello there", return_tensors="np").input_ids
_lowerCAmelCase : Dict = tokenizer("Hi I am", return_tensors="np").input_ids
_lowerCAmelCase : int = shift_tokens_right(__a, model.config.pad_token_id, model.config.decoder_start_token_id)
_lowerCAmelCase : List[Any] = model(__a, decoder_input_ids=__a).logits
_lowerCAmelCase : Union[str, Any] = optax.softmax_cross_entropy(__a, onehot(__a, logits.shape[-1])).mean()
_lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
_lowerCAmelCase : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 351
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 0
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Union[str, Any] = "pt"
_lowerCAmelCase : List[Any] = "tf"
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = AutoModel.from_pretrained(self.test_model)
model_pt.save_pretrained(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained(self.test_model, from_pt=__a)
model_tf.save_pretrained(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
_lowerCAmelCase : List[str] = FeaturesManager.determine_framework(self.test_model, __a)
self.assertEqual(__a, __a)
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a)
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(__a, __a)
self.assertEqual(__a, __a)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a)
_lowerCAmelCase : int = FeaturesManager.determine_framework(__a, __a)
self.assertEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a)
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(__a)
self.assertEqual(__a, self.framework_pt)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a)
_lowerCAmelCase : List[Any] = FeaturesManager.determine_framework(__a)
self.assertEqual(__a, self.framework_tf)
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=__a)
with patch("transformers.onnx.features.is_tf_available", __a):
_lowerCAmelCase : str = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__a, self.framework_pt)
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Tuple = MagicMock(return_value=__a)
with patch("transformers.onnx.features.is_torch_available", __a):
_lowerCAmelCase : int = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__a, self.framework_tf)
# Both in environment -> use PyTorch
_lowerCAmelCase : Union[str, Any] = MagicMock(return_value=__a)
_lowerCAmelCase : str = MagicMock(return_value=__a)
with patch("transformers.onnx.features.is_tf_available", __a), patch(
"transformers.onnx.features.is_torch_available", __a):
_lowerCAmelCase : str = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__a, self.framework_pt)
# Both not in environment -> raise error
_lowerCAmelCase : Optional[int] = MagicMock(return_value=__a)
_lowerCAmelCase : str = MagicMock(return_value=__a)
with patch("transformers.onnx.features.is_tf_available", __a), patch(
"transformers.onnx.features.is_torch_available", __a):
with self.assertRaises(__a):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model)
| 352
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 0
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = field(default=a , metadata={'help': 'Whether to use SortishSampler or not.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = super().to_dict()
for k, v in d.items():
if isinstance(__a, __a):
_lowerCAmelCase : str = v.to_dict()
return d
| 353
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_snake_case = False
_snake_case = True
_snake_case = False
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_snake_case = parser.parse_args()
_snake_case = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
_snake_case = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
_snake_case = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
_snake_case = reader.read()
_snake_case = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
_snake_case = UNetaDModel(**config)
else:
_snake_case = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
_snake_case = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_snake_case = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_snake_case = config[key]
del config[key]
_snake_case = [k.replace("UNetRes", "") for k in config["down_block_types"]]
_snake_case = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
_snake_case = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
_snake_case = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
_snake_case = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
_snake_case = param_value
_snake_case = True
if not has_changed:
_snake_case = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 354
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a, scheduler=__a)
@torch.no_grad()
def __call__( self, __a = 1, __a = 100, __a = None, __a = None, __a = True, ):
'''simple docstring'''
if audio_length_in_s is None:
_lowerCAmelCase : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCAmelCase : List[Any] = audio_length_in_s * self.unet.config.sample_rate
_lowerCAmelCase : Dict = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}.")
_lowerCAmelCase : Optional[int] = int(__a)
if sample_size % down_scale_factor != 0:
_lowerCAmelCase : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
" process.")
_lowerCAmelCase : Union[str, Any] = int(__a)
_lowerCAmelCase : Dict = next(iter(self.unet.parameters())).dtype
_lowerCAmelCase : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__a, __a) and len(__a) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(__a)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators.")
_lowerCAmelCase : List[str] = randn_tensor(__a, generator=__a, device=self.device, dtype=__a)
# set step values
self.scheduler.set_timesteps(__a, device=audio.device)
_lowerCAmelCase : int = self.scheduler.timesteps.to(__a)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_lowerCAmelCase : Union[str, Any] = self.unet(__a, __a).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCAmelCase : str = self.scheduler.step(__a, __a, __a).prev_sample
_lowerCAmelCase : Tuple = audio.clamp(-1, 1).float().cpu().numpy()
_lowerCAmelCase : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__a)
| 355
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , a , a):
lowerCamelCase__ = 32
lowerCamelCase__ = 4
lowerCamelCase__ = 4
lowerCamelCase__ = (
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'DownBlock2D',
)
lowerCamelCase__ = ('UpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D')
lowerCamelCase__ = False
lowerCamelCase__ = (320, 640, 1280, 1280)
lowerCamelCase__ = 2
lowerCamelCase__ = 8
lowerCamelCase__ = None
lowerCamelCase__ = 1280
lowerCamelCase__ = 0.0
lowerCamelCase__ = False
lowerCamelCase__ = jnp.floataa
lowerCamelCase__ = True
lowerCamelCase__ = 0
lowerCamelCase__ = False
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
_lowerCAmelCase : str = jnp.zeros(__a, dtype=jnp.floataa)
_lowerCAmelCase : Optional[int] = jnp.ones((1,), dtype=jnp.intaa)
_lowerCAmelCase : int = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa)
_lowerCAmelCase : int = jax.random.split(__a)
_lowerCAmelCase : Tuple = {"params": params_rng, "dropout": dropout_rng}
return self.init(__a, __a, __a, __a)["params"]
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.block_out_channels
_lowerCAmelCase : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.")
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_lowerCAmelCase : Dict = self.num_attention_heads or self.attention_head_dim
# input
_lowerCAmelCase : int = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
_lowerCAmelCase : Optional[int] = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift)
_lowerCAmelCase : Dict = FlaxTimestepEmbedding(__a, dtype=self.dtype)
_lowerCAmelCase : List[Any] = self.only_cross_attention
if isinstance(__a, __a):
_lowerCAmelCase : List[Any] = (only_cross_attention,) * len(self.down_block_types)
if isinstance(__a, __a):
_lowerCAmelCase : Any = (num_attention_heads,) * len(self.down_block_types)
# down
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
_lowerCAmelCase : Union[str, Any] = output_channel
_lowerCAmelCase : Tuple = block_out_channels[i]
_lowerCAmelCase : Dict = i == len(__a) - 1
if down_block_type == "CrossAttnDownBlock2D":
_lowerCAmelCase : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=__a, out_channels=__a, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
else:
_lowerCAmelCase : List[Any] = FlaxDownBlockaD(
in_channels=__a, out_channels=__a, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(__a)
_lowerCAmelCase : Any = down_blocks
# mid
_lowerCAmelCase : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1], dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
# up
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[int] = list(reversed(__a))
_lowerCAmelCase : str = list(reversed(__a))
_lowerCAmelCase : Optional[Any] = list(reversed(__a))
_lowerCAmelCase : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
_lowerCAmelCase : Dict = output_channel
_lowerCAmelCase : Dict = reversed_block_out_channels[i]
_lowerCAmelCase : Union[str, Any] = reversed_block_out_channels[min(i + 1, len(__a) - 1)]
_lowerCAmelCase : Union[str, Any] = i == len(__a) - 1
if up_block_type == "CrossAttnUpBlock2D":
_lowerCAmelCase : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=__a, out_channels=__a, prev_output_channel=__a, num_layers=self.layers_per_block + 1, num_attention_heads=reversed_num_attention_heads[i], add_upsample=not is_final_block, dropout=self.dropout, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
else:
_lowerCAmelCase : Optional[int] = FlaxUpBlockaD(
in_channels=__a, out_channels=__a, prev_output_channel=__a, num_layers=self.layers_per_block + 1, add_upsample=not is_final_block, dropout=self.dropout, dtype=self.dtype, )
up_blocks.append(__a)
_lowerCAmelCase : Optional[int] = output_channel
_lowerCAmelCase : Union[str, Any] = up_blocks
# out
_lowerCAmelCase : List[str] = nn.GroupNorm(num_groups=32, epsilon=1E-5)
_lowerCAmelCase : Optional[int] = nn.Conv(
self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
def __call__( self, __a, __a, __a, __a=None, __a=None, __a = True, __a = False, ):
'''simple docstring'''
if not isinstance(__a, jnp.ndarray):
_lowerCAmelCase : Union[str, Any] = jnp.array([timesteps], dtype=jnp.intaa)
elif isinstance(__a, jnp.ndarray) and len(timesteps.shape) == 0:
_lowerCAmelCase : int = timesteps.astype(dtype=jnp.floataa)
_lowerCAmelCase : Dict = jnp.expand_dims(__a, 0)
_lowerCAmelCase : str = self.time_proj(__a)
_lowerCAmelCase : List[str] = self.time_embedding(__a)
# 2. pre-process
_lowerCAmelCase : str = jnp.transpose(__a, (0, 2, 3, 1))
_lowerCAmelCase : Any = self.conv_in(__a)
# 3. down
_lowerCAmelCase : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__a, __a):
_lowerCAmelCase : Any = down_block(__a, __a, __a, deterministic=not train)
else:
_lowerCAmelCase : Dict = down_block(__a, __a, deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_lowerCAmelCase : List[Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
__a, __a):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_lowerCAmelCase : str = new_down_block_res_samples
# 4. mid
_lowerCAmelCase : Optional[int] = self.mid_block(__a, __a, __a, deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_lowerCAmelCase : Optional[int] = down_block_res_samples[-(self.layers_per_block + 1) :]
_lowerCAmelCase : Tuple = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = up_block(
__a, temb=__a, encoder_hidden_states=__a, res_hidden_states_tuple=__a, deterministic=not train, )
else:
_lowerCAmelCase : Optional[int] = up_block(__a, temb=__a, res_hidden_states_tuple=__a, deterministic=not train)
# 6. post-process
_lowerCAmelCase : Union[str, Any] = self.conv_norm_out(__a)
_lowerCAmelCase : Tuple = nn.silu(__a)
_lowerCAmelCase : Optional[int] = self.conv_out(__a)
_lowerCAmelCase : Any = jnp.transpose(__a, (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__a)
| 356
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model"}
_snake_case = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
_snake_case = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
_snake_case = "▁"
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a="</s>", __a="<unk>", __a="<pad>", __a=100, __a=None, __a = None, __a=True, **__a, ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : Union[str, Any] = [f"<extra_id_{i}>" for i in range(__a)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : Dict = len(set(filter(lambda __a: bool("extra_id" in str(__a)), __a)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_lowerCAmelCase : List[Any] = legacy
_lowerCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__a, unk_token=__a, pad_token=__a, extra_ids=__a, additional_special_tokens=__a, sp_model_kwargs=self.sp_model_kwargs, legacy=__a, **__a, )
_lowerCAmelCase : str = vocab_file
_lowerCAmelCase : Union[str, Any] = extra_ids
_lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__a)
@staticmethod
def snake_case__ ( __a, __a, __a):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_lowerCAmelCase : Dict = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value.", __a, )
return max_model_length
@property
def snake_case__ ( self):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(__a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a)) + [1]
return ([0] * len(__a)) + [1] + ([0] * len(__a)) + [1]
def snake_case__ ( self):
'''simple docstring'''
return list(
set(filter(lambda __a: bool(re.search(R"<extra_id_\d+>", __a)) is not None, self.additional_special_tokens)))
def snake_case__ ( self):
'''simple docstring'''
return [self._convert_token_to_id(__a) for token in self.get_sentinel_tokens()]
def snake_case__ ( self, __a):
'''simple docstring'''
if len(__a) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._add_eos_if_not_present(__a)
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : List[str] = self._add_eos_if_not_present(__a)
return token_ids_a + token_ids_a
def __getstate__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.__dict__.copy()
_lowerCAmelCase : Any = None
return state
def __setstate__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
if not self.legacy:
_lowerCAmelCase : Tuple = SPIECE_UNDERLINE + text.replace(__a, " ")
return super().tokenize(__a, **__a)
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
if not self.legacy:
_lowerCAmelCase : Union[str, Any] = text.startswith(__a)
if is_first:
_lowerCAmelCase : Dict = text[1:]
_lowerCAmelCase : List[Any] = self.sp_model.encode(__a, out_type=__a)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(__a):
_lowerCAmelCase : List[str] = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def snake_case__ ( self, __a):
'''simple docstring'''
if token.startswith("<extra_id_"):
_lowerCAmelCase : str = re.match(R"<extra_id_(\d+)>", __a)
_lowerCAmelCase : Optional[int] = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_lowerCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(__a)
else:
_lowerCAmelCase : Optional[int] = f"<extra_id_{self.vocab_size - 1 - index}>"
return token
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Any = ""
_lowerCAmelCase : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a) + token
_lowerCAmelCase : Dict = True
_lowerCAmelCase : int = []
else:
current_sub_tokens.append(__a)
_lowerCAmelCase : Dict = False
out_string += self.sp_model.decode(__a)
return out_string.strip()
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : List[str] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, __a)
elif not os.path.isfile(self.vocab_file):
with open(__a, "wb") as fi:
_lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(__a)
return (out_vocab_file,)
| 357
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=12, __a=7, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a=0.1, __a=0.1, __a=512, __a=0.02, __a=0, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : List[str] = use_input_mask
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : str = projection_dim
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : int = attention_dropout
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Optional[Any] = scope
_lowerCAmelCase : Optional[int] = bos_token_id
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
_lowerCAmelCase : Union[str, Any] = input_mask.numpy()
_lowerCAmelCase : int = input_mask.shape
_lowerCAmelCase : List[str] = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(__a):
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(__a)
def snake_case__ ( self):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFBlipTextModel(config=__a)
_lowerCAmelCase : List[Any] = model(__a, attention_mask=__a, training=__a)
_lowerCAmelCase : str = model(__a, training=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase : str = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = (TFBlipTextModel,) if is_tf_available() else ()
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BlipTextModelTester(self)
_lowerCAmelCase : Optional[int] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING")
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = TFBlipTextModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def snake_case__ ( self, __a=True):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__a)
| 358
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 0
|
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCAmelCase : Dict = array[indexa], array[indexa]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if length > 1:
_lowerCAmelCase : List[str] = int(length / 2 )
for i in range(_lowerCamelCase , low + middle ):
comp_and_swap(_lowerCamelCase , _lowerCamelCase , i + middle , _lowerCamelCase )
bitonic_merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
bitonic_merge(_lowerCamelCase , low + middle , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if length > 1:
_lowerCAmelCase : Any = int(length / 2 )
bitonic_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 1 )
bitonic_sort(_lowerCamelCase , low + middle , _lowerCamelCase , 0 )
bitonic_merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 359
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 0
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 360
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'unispeech'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="mean", __a=False, __a=False, __a=256, __a=80, __a=0, __a=1, __a=2, __a=0.5, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : int = feat_extract_norm
_lowerCAmelCase : Tuple = feat_extract_activation
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : Tuple = conv_bias
_lowerCAmelCase : Optional[int] = num_conv_pos_embeddings
_lowerCAmelCase : int = num_conv_pos_embedding_groups
_lowerCAmelCase : Union[str, Any] = len(self.conv_dim)
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = hidden_dropout
_lowerCAmelCase : Dict = attention_dropout
_lowerCAmelCase : Any = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : str = final_dropout
_lowerCAmelCase : List[str] = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : int = num_ctc_classes
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Dict = do_stable_layer_norm
_lowerCAmelCase : str = use_weighted_layer_sum
_lowerCAmelCase : int = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : Dict = mask_time_prob
_lowerCAmelCase : Dict = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : Any = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Any = num_codevectors_per_group
_lowerCAmelCase : List[Any] = num_codevector_groups
_lowerCAmelCase : Dict = contrastive_logits_temperature
_lowerCAmelCase : Union[str, Any] = feat_quantizer_dropout
_lowerCAmelCase : Union[str, Any] = num_negatives
_lowerCAmelCase : Any = codevector_dim
_lowerCAmelCase : List[Any] = proj_codevector_dim
_lowerCAmelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Union[str, Any] = ctc_loss_reduction
_lowerCAmelCase : Union[str, Any] = ctc_zero_infinity
# pretraining loss
_lowerCAmelCase : str = replace_prob
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 361
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'lxmert'
lowerCamelCase__ = {}
def __init__( self, __a=3_0522, __a=768, __a=12, __a=9500, __a=1600, __a=400, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=9, __a=5, __a=5, __a=2048, __a=4, __a=6.67, __a=True, __a=True, __a=True, __a=True, __a=True, __a=True, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Tuple = num_qa_labels
_lowerCAmelCase : Union[str, Any] = num_object_labels
_lowerCAmelCase : Optional[Any] = num_attr_labels
_lowerCAmelCase : Any = l_layers
_lowerCAmelCase : Any = x_layers
_lowerCAmelCase : Optional[Any] = r_layers
_lowerCAmelCase : Any = visual_feat_dim
_lowerCAmelCase : List[str] = visual_pos_dim
_lowerCAmelCase : Optional[Any] = visual_loss_normalizer
_lowerCAmelCase : Optional[int] = task_matched
_lowerCAmelCase : str = task_mask_lm
_lowerCAmelCase : Optional[Any] = task_obj_predict
_lowerCAmelCase : str = task_qa
_lowerCAmelCase : Union[str, Any] = visual_obj_loss
_lowerCAmelCase : Union[str, Any] = visual_attr_loss
_lowerCAmelCase : Dict = visual_feat_loss
_lowerCAmelCase : List[str] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**__a)
| 362
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 0
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 363
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 0
|
from itertools import count
def A ( _lowerCamelCase = 50 ):
'''simple docstring'''
_lowerCAmelCase : Dict = [1] * min_block_length
for n in count(_lowerCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_lowerCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 364
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCAmelCase_ :
def __init__( self, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : List[Any] = 13
_lowerCAmelCase : List[str] = 7
_lowerCAmelCase : Any = True
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : str = 99
_lowerCAmelCase : Union[str, Any] = 32
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : Tuple = 4
_lowerCAmelCase : Dict = 37
_lowerCAmelCase : List[str] = "gelu"
_lowerCAmelCase : int = 0.1
_lowerCAmelCase : Tuple = 0.1
_lowerCAmelCase : Optional[Any] = 512
_lowerCAmelCase : Dict = 16
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : Tuple = 0.02
_lowerCAmelCase : Optional[Any] = 3
_lowerCAmelCase : List[Any] = 4
_lowerCAmelCase : str = None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : int = None
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, pad_token_id=1, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self):
'''simple docstring'''
(
_lowerCAmelCase
) : str = self.prepare_config_and_inputs()
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFEsmModel(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
_lowerCAmelCase : Optional[Any] = model(__a)
_lowerCAmelCase : Any = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[int] = TFEsmModel(config=__a)
_lowerCAmelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
_lowerCAmelCase : Optional[int] = model(__a)
_lowerCAmelCase : List[str] = [input_ids, input_mask]
_lowerCAmelCase : Any = model(__a, encoder_hidden_states=__a)
# Also check the case where encoder outputs are not passed
_lowerCAmelCase : List[str] = model(__a, attention_mask=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFEsmForMaskedLM(config=__a)
_lowerCAmelCase : Any = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Any = TFEsmForTokenClassification(config=__a)
_lowerCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
_lowerCAmelCase : Tuple = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Optional[Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFEsmModelTester(self)
_lowerCAmelCase : Tuple = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFEsmModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@unittest.skip("Protein models do not support embedding resizing.")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(__a)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowerCAmelCase : Union[str, Any] = model.get_bias()
assert isinstance(__a, __a)
for k, v in name.items():
assert isinstance(__a, tf.Variable)
else:
_lowerCAmelCase : str = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Tuple = model.get_bias()
assert name is None
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D")
_lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Optional[Any] = model(__a)[0]
_lowerCAmelCase : int = [1, 6, 33]
self.assertEqual(list(output.numpy().shape), __a)
# compare the actual values for a slice.
_lowerCAmelCase : Optional[Any] = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-2))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D")
_lowerCAmelCase : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
_lowerCAmelCase : Tuple = model(__a)[0]
# compare the actual values for a slice.
_lowerCAmelCase : Optional[Any] = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4))
| 365
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 0
|
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = image.size
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : str = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 366
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_snake_case = logging.getLogger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'sequence-classification'
def __init__( self, __a):
'''simple docstring'''
if type(__a) == dict:
_lowerCAmelCase : Union[str, Any] = Namespace(**__a)
_lowerCAmelCase : str = glue_output_modes[hparams.task]
_lowerCAmelCase : Tuple = glue_tasks_num_labels[hparams.task]
super().__init__(__a, __a, self.mode)
def snake_case__ ( self, **__a):
'''simple docstring'''
return self.model(**__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase : Dict = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_lowerCAmelCase : str = self(**__a)
_lowerCAmelCase : Any = outputs[0]
_lowerCAmelCase : Tuple = self.trainer.lr_schedulers[0]["scheduler"]
_lowerCAmelCase : List[Any] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.hparams
_lowerCAmelCase : Any = processors[args.task]()
_lowerCAmelCase : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase : Optional[int] = self._feature_file(__a)
if os.path.exists(__a) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", __a)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
_lowerCAmelCase : Optional[int] = (
processor.get_dev_examples(args.data_dir)
if mode == "dev"
else processor.get_train_examples(args.data_dir)
)
_lowerCAmelCase : Union[str, Any] = convert_examples_to_features(
__a, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode, )
logger.info("Saving features into cached file %s", __a)
torch.save(__a, __a)
def snake_case__ ( self, __a, __a, __a = False):
'''simple docstring'''
_lowerCAmelCase : Any = "dev" if mode == "test" else mode
_lowerCAmelCase : Optional[int] = self._feature_file(__a)
logger.info("Loading features from cached file %s", __a)
_lowerCAmelCase : int = torch.load(__a)
_lowerCAmelCase : int = torch.tensor([f.input_ids for f in features], dtype=torch.long)
_lowerCAmelCase : List[str] = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
_lowerCAmelCase : List[str] = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase : Optional[Any] = torch.tensor([f.label for f in features], dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase : Tuple = torch.tensor([f.label for f in features], dtype=torch.float)
return DataLoader(
TensorDataset(__a, __a, __a, __a), batch_size=__a, shuffle=__a, )
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase : Optional[int] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_lowerCAmelCase : Any = self(**__a)
_lowerCAmelCase : Tuple = outputs[:2]
_lowerCAmelCase : List[str] = logits.detach().cpu().numpy()
_lowerCAmelCase : Any = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = torch.stack([x["val_loss"] for x in outputs]).mean().detach().cpu().item()
_lowerCAmelCase : Union[str, Any] = np.concatenate([x["pred"] for x in outputs], axis=0)
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase : Dict = np.argmax(__a, axis=1)
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase : Dict = np.squeeze(__a)
_lowerCAmelCase : Any = np.concatenate([x["target"] for x in outputs], axis=0)
_lowerCAmelCase : List[str] = [[] for _ in range(out_label_ids.shape[0])]
_lowerCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0])]
_lowerCAmelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task, __a, __a)}
_lowerCAmelCase : List[Any] = dict(results.items())
_lowerCAmelCase : Dict = results
return ret, preds_list, out_label_list
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = self._eval_end(__a)
_lowerCAmelCase : int = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self._eval_end(__a)
_lowerCAmelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case__ ( __a, __a):
'''simple docstring'''
BaseTransformer.add_model_specific_args(__a, __a)
parser.add_argument(
"--max_seq_length", default=128, type=__a, help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
), )
parser.add_argument(
"--task", default="", type=__a, required=__a, help="The GLUE task to run", )
parser.add_argument(
"--gpus", default=0, type=__a, help="The number of GPUs allocated for this, it is by default 0 meaning none", )
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
return parser
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(_lowerCamelCase , os.getcwd() )
_lowerCAmelCase : Dict = GLUETransformer.add_model_specific_args(_lowerCamelCase , os.getcwd() )
_lowerCAmelCase : List[str] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase : str = os.path.join(
"./results" , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
_lowerCAmelCase : List[Any] = GLUETransformer(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = generic_train(_lowerCamelCase , _lowerCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase : List[str] = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_lowerCamelCase ) )
_lowerCAmelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowerCamelCase )
if __name__ == "__main__":
main()
| 367
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 0
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = ["model.decoder.embed_positions.weights"]
def A ( _lowerCamelCase ):
'''simple docstring'''
if "emb" in name:
_lowerCAmelCase : List[Any] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
_lowerCAmelCase : Optional[Any] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
_lowerCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
_lowerCAmelCase : Dict = name.replace("linear1" , "fc1" )
if "linear2" in name:
_lowerCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
_lowerCAmelCase : Optional[Any] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
_lowerCAmelCase : Tuple = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
_lowerCAmelCase : Any = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
_lowerCAmelCase : Dict = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = list(state_dict.keys() )
_lowerCAmelCase : str = {}
for key in keys:
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = rename_keys(_lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
_lowerCAmelCase : int = val[:hidden_size, :]
_lowerCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_lowerCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_lowerCAmelCase : Optional[int] = val
else:
_lowerCAmelCase : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def A ( _lowerCamelCase ):
'''simple docstring'''
if checkpoint == "small":
# default config values
_lowerCAmelCase : Optional[int] = 1_024
_lowerCAmelCase : List[str] = 24
_lowerCAmelCase : int = 16
elif checkpoint == "medium":
_lowerCAmelCase : str = 1_536
_lowerCAmelCase : str = 48
_lowerCAmelCase : Union[str, Any] = 24
elif checkpoint == "large":
_lowerCAmelCase : Dict = 2_048
_lowerCAmelCase : Any = 48
_lowerCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
_lowerCAmelCase : int = MusicgenDecoderConfig(
hidden_size=_lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCamelCase , num_attention_heads=_lowerCamelCase , )
return config
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="cpu" ):
'''simple docstring'''
_lowerCAmelCase : Any = MusicGen.get_pretrained(_lowerCamelCase , device=_lowerCamelCase )
_lowerCAmelCase : int = decoder_config_from_checkpoint(_lowerCamelCase )
_lowerCAmelCase : List[Any] = fairseq_model.lm.state_dict()
_lowerCAmelCase : Any = rename_state_dict(
_lowerCamelCase , hidden_size=decoder_config.hidden_size )
_lowerCAmelCase : Optional[Any] = TaEncoderModel.from_pretrained("t5-base" )
_lowerCAmelCase : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_lowerCAmelCase : Union[str, Any] = MusicgenForCausalLM(_lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_lowerCAmelCase : Optional[Any] = decoder.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(_lowerCamelCase ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
_lowerCAmelCase : Any = MusicgenForConditionalGeneration(text_encoder=_lowerCamelCase , audio_encoder=_lowerCamelCase , decoder=_lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCamelCase )
# check we can do a forward pass
_lowerCAmelCase : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_lowerCAmelCase : List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_lowerCAmelCase : Dict = model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_lowerCAmelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
_lowerCAmelCase : Tuple = MusicgenProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
# set the appropriate bos/pad token ids
_lowerCAmelCase : Optional[Any] = 2_048
_lowerCAmelCase : Any = 2_048
# set other default generation config params
_lowerCAmelCase : Optional[int] = int(30 * audio_encoder.config.frame_rate )
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(_lowerCamelCase )
processor.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_snake_case = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 368
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 0
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_snake_case = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class UpperCAmelCase_ ( tr.AbstractTransform):
def __init__( self, __a = " "):
'''simple docstring'''
_lowerCAmelCase : int = sentence_delimiter
def snake_case__ ( self, __a):
'''simple docstring'''
return list(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for sent_idx, sentence in enumerate(__a):
chars.extend(self.process_string(__a))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__a) - 1:
chars.append(self.sentence_delimiter)
return chars
_snake_case = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_snake_case = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_snake_case = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_snake_case = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
_snake_case = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}), codebase_urls=["https://github.com/jitsi/jiwer/"], reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
], )
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__a, __a, truth_transform=__a, hypothesis_transform=__a, )["wer"]
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[Any] = 0
for prediction, reference in zip(__a, __a):
_lowerCAmelCase : Dict = jiwer.compute_measures(
__a, __a, truth_transform=__a, hypothesis_transform=__a, )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 369
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 0
|
"""simple docstring"""
import math
import qiskit
def A ( _lowerCamelCase = 1 , _lowerCamelCase = 1 , _lowerCamelCase = 1 ):
'''simple docstring'''
if (
isinstance(_lowerCamelCase , _lowerCamelCase )
or isinstance(_lowerCamelCase , _lowerCamelCase )
or isinstance(_lowerCamelCase , _lowerCamelCase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(_lowerCamelCase ) != input_a)
or (math.floor(_lowerCamelCase ) != input_a)
or (math.floor(_lowerCamelCase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
_lowerCAmelCase : List[Any] = qiskit.QuantumRegister(4 , "qr" )
_lowerCAmelCase : Dict = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
_lowerCAmelCase : Any = [input_a, input_a, carry_in]
_lowerCAmelCase : List[str] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowerCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowerCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowerCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowerCamelCase ) # measure the last two qbits
_lowerCAmelCase : Tuple = qiskit.Aer.get_backend("aer_simulator" )
_lowerCAmelCase : Dict = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_000 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 370
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 0
|
import datasets
_snake_case = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
_snake_case = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
_snake_case = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
}), codebase_urls=[], reference_urls=[], format="numpy", )
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return {"accuracy": simple_accuracy(__a, __a)}
| 371
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=4, ):
'''simple docstring'''
_lowerCAmelCase : int = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Optional[Any] = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Optional[int] = use_attention_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : List[str] = type_sequence_label_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : List[str] = num_choices
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Any = None
if self.use_attention_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : Any = None
if self.use_token_type_ids:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__a, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase : str = config_and_inputs
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = FlaxAlbertModelTester(self)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : int = model_class_name.from_pretrained("albert-base-v2")
_lowerCAmelCase : Union[str, Any] = model(np.ones((1, 1)))
self.assertIsNotNone(__a)
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2")
_lowerCAmelCase : int = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
_lowerCAmelCase : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_lowerCAmelCase : Tuple = model(__a, attention_mask=__a)[0]
_lowerCAmelCase : Dict = (1, 11, 768)
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], __a, atol=1E-4))
| 350
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 0
|
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : Optional[Any] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : List[Any] = False
_snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
_snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 351
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 0
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 352
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'openai/whisper-base'
lowerCamelCase__ = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
lowerCamelCase__ = 'transcriber'
lowerCamelCase__ = WhisperProcessor
lowerCamelCase__ = WhisperForConditionalGeneration
lowerCamelCase__ = ['audio']
lowerCamelCase__ = ['text']
def snake_case__ ( self, __a):
'''simple docstring'''
return self.pre_processor(__a, return_tensors="pt").input_features
def snake_case__ ( self, __a):
'''simple docstring'''
return self.model.generate(inputs=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.pre_processor.batch_decode(__a, skip_special_tokens=__a)[0]
| 353
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 0
|
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 354
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 0
|
"""simple docstring"""
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [0] * len(_lowerCamelCase )
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Optional[int] = [1] * len(_lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCamelCase )
while queue:
_lowerCAmelCase : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCAmelCase : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_lowerCamelCase )
print(max(_lowerCamelCase ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 355
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 0
|
import cmath
import math
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = math.radians(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = math.radians(_lowerCamelCase )
# Convert voltage and current to rectangular form
_lowerCAmelCase : Tuple = cmath.rect(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = cmath.rect(_lowerCamelCase , _lowerCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
_snake_case = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = RealmTokenizer
def __init__( self, __a=None, __a=None, __a=True, __a="[UNK]", __a="[SEP]", __a="[PAD]", __a="[CLS]", __a="[MASK]", __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
__a, tokenizer_file=__a, do_lower_case=__a, unk_token=__a, sep_token=__a, pad_token=__a, cls_token=__a, mask_token=__a, tokenize_chinese_chars=__a, strip_accents=__a, **__a, )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase", __a) != do_lower_case
or normalizer_state.get("strip_accents", __a) != strip_accents
or normalizer_state.get("handle_chinese_chars", __a) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(__a, normalizer_state.pop("type"))
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : Dict = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Any = normalizer_class(**__a)
_lowerCAmelCase : List[Any] = do_lower_case
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : Dict = PaddingStrategy.MAX_LENGTH
_lowerCAmelCase : Union[str, Any] = text
_lowerCAmelCase : Any = kwargs.pop("text_pair", __a)
_lowerCAmelCase : Tuple = kwargs.pop("return_tensors", __a)
_lowerCAmelCase : List[str] = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(__a):
if batch_text_pair is not None:
_lowerCAmelCase : Optional[Any] = batch_text_pair[idx]
else:
_lowerCAmelCase : str = None
_lowerCAmelCase : Union[str, Any] = super().__call__(__a, __a, return_tensors=__a, **__a)
_lowerCAmelCase : Union[str, Any] = encoded_candidates.get("input_ids")
_lowerCAmelCase : List[Any] = encoded_candidates.get("attention_mask")
_lowerCAmelCase : Optional[int] = encoded_candidates.get("token_type_ids")
if encoded_input_ids is not None:
output_data["input_ids"].append(__a)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__a)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__a)
_lowerCAmelCase : Optional[int] = {key: item for key, item in output_data.items() if len(__a) != 0}
return BatchEncoding(__a, tensor_type=__a)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Dict = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
| 357
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 0
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_snake_case = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCAmelCase_ :
def __init__( self, __a, __a=16, __a=13, __a=7, __a=14, __a=10, __a=19, __a=5, __a=4, __a=True, __a=16, __a=2, __a=4, __a=4, __a="gelu", __a=0.1, __a=0.1, __a=[1, 2, 3, 4, 5], __a=25, __a=5, ):
'''simple docstring'''
_lowerCAmelCase : int = d_model
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Optional[int] = prediction_length
_lowerCAmelCase : Tuple = context_length
_lowerCAmelCase : List[Any] = cardinality
_lowerCAmelCase : Union[str, Any] = num_time_features
_lowerCAmelCase : Any = lags_sequence
_lowerCAmelCase : Optional[Any] = embedding_dimension
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = context_length
_lowerCAmelCase : Tuple = prediction_length + label_length
_lowerCAmelCase : List[Any] = label_length
_lowerCAmelCase : Optional[Any] = moving_average
_lowerCAmelCase : List[str] = autocorrelation_factor
def snake_case__ ( self):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, prediction_length=self.prediction_length, context_length=self.context_length, label_length=self.label_length, lags_sequence=self.lags_sequence, num_time_features=self.num_time_features, num_static_categorical_features=1, cardinality=[self.cardinality], embedding_dimension=[self.embedding_dimension], moving_average=self.moving_average, )
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = config.context_length + max(config.lags_sequence)
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, 1], config.cardinality[0])
_lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features])
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, _past_length])
_lowerCAmelCase : str = floats_tensor([self.batch_size, _past_length]) > 0.5
# decoder inputs
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features])
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, config.prediction_length])
_lowerCAmelCase : List[str] = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_config()
_lowerCAmelCase : Dict = self.prepare_autoformer_inputs_dict(__a)
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = AutoformerModel(config=__a).to(__a).eval()
_lowerCAmelCase : List[str] = model(**__a)
_lowerCAmelCase : List[Any] = outputs.encoder_last_hidden_state
_lowerCAmelCase : str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(__a)
_lowerCAmelCase : Optional[int] = AutoformerEncoder.from_pretrained(__a).to(__a)
_lowerCAmelCase : Dict = model.create_network_inputs(**__a)
_lowerCAmelCase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...])
_lowerCAmelCase : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]), dim=-1, )
_lowerCAmelCase : Tuple = encoder(inputs_embeds=__a)[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3)
_lowerCAmelCase : Optional[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...], dim=1)
.unsqueeze(1)
.repeat(1, config.prediction_length, 1)
)
_lowerCAmelCase : List[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]], device=enc_input.device, )
_lowerCAmelCase : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros), dim=1),
feature[:, config.context_length - config.label_length :, ...],
), dim=-1, )
_lowerCAmelCase : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean), dim=1),
feature[:, config.context_length - config.label_length :, ...],
), dim=-1, )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : int = model.get_decoder()
decoder.save_pretrained(__a)
_lowerCAmelCase : Tuple = AutoformerDecoder.from_pretrained(__a).to(__a)
_lowerCAmelCase : Any = decoder(
trend=__a, inputs_embeds=__a, encoder_hidden_states=__a, )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3)
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCamelCase__ = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCamelCase__ = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = AutoformerModelTester(self)
_lowerCAmelCase : List[str] = ConfigTester(self, config_class=__a, has_text_modality=__a)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(__a)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a)
_lowerCAmelCase : Any = model_class.from_pretrained(__a, output_loading_info=__a)
self.assertEqual(info["missing_keys"], [])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a)
@unittest.skip(reason="Model has no tokens embeddings")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = inspect.signature(getattr(__a, "forward"))
# The main input is the name of the argument after `self`
_lowerCAmelCase : Union[str, Any] = list(model_signature.parameters.keys())[1]
self.assertEqual(AutoformerModel.main_input_name, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(__a)
_lowerCAmelCase : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : int = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask")
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
])
self.assertListEqual(arg_names[: len(__a)], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[int] = getattr(self.model_tester, "seq_length", __a)
_lowerCAmelCase : Union[str, Any] = getattr(self.model_tester, "decoder_seq_length", __a)
_lowerCAmelCase : Tuple = getattr(self.model_tester, "encoder_seq_length", __a)
_lowerCAmelCase : Optional[Any] = getattr(self.model_tester, "d_model", __a)
_lowerCAmelCase : List[Any] = getattr(self.model_tester, "num_attention_heads", __a)
_lowerCAmelCase : int = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowerCAmelCase : str = True
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Optional[Any] = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(__a, __a))
_lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : str = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_lowerCAmelCase : Any = model(**self._prepare_for_class(__a, __a))
_lowerCAmelCase : List[Any] = outputs.encoder_attentions
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, dim], )
_lowerCAmelCase : Dict = len(__a)
_lowerCAmelCase : Dict = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a, __a)
# decoder attentions
_lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertIsInstance(__a, (list, tuple))
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, dim], )
# cross attentions
_lowerCAmelCase : List[Any] = outputs.cross_attentions
self.assertIsInstance(__a, (list, tuple))
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, dim], )
# Check attention is always last and order is fine
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Optional[int] = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(__a, __a))
self.assertEqual(out_len + 2, len(__a))
_lowerCAmelCase : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, dim], )
@is_flaky()
def snake_case__ ( self):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def A ( _lowerCamelCase="train-batch.pt" ):
'''simple docstring'''
_lowerCAmelCase : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_lowerCamelCase , repo_type="dataset" )
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly").to(__a)
_lowerCAmelCase : List[Any] = prepare_batch()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(
past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], future_values=batch["future_values"], future_time_features=batch["future_time_features"], )[0]
_lowerCAmelCase : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size))
self.assertEqual(output.shape, __a)
_lowerCAmelCase : List[str] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]], device=__a)
self.assertTrue(torch.allclose(output[0, :3, :3], __a, atol=__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(__a)
_lowerCAmelCase : Union[str, Any] = prepare_batch("val-batch.pt")
with torch.no_grad():
_lowerCAmelCase : str = model(
past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], ).encoder_last_hidden_state
_lowerCAmelCase : Dict = torch.Size((64, model.config.context_length, model.config.d_model))
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Any = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]], device=__a)
self.assertTrue(torch.allclose(output[0, :3, :3], __a, atol=__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(__a)
_lowerCAmelCase : Optional[Any] = prepare_batch("val-batch.pt")
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model.generate(
static_categorical_features=batch["static_categorical_features"], past_time_features=batch["past_time_features"], past_values=batch["past_values"], future_time_features=batch["future_time_features"], past_observed_mask=batch["past_observed_mask"], )
_lowerCAmelCase : Dict = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length))
self.assertEqual(outputs.sequences.shape, __a)
_lowerCAmelCase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786], device=__a)
_lowerCAmelCase : Optional[int] = outputs.sequences.mean(dim=1)
self.assertTrue(torch.allclose(mean_prediction[0, -3:], __a, rtol=1E-1))
| 358
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_snake_case = "\nHuman: <<task>>\n\nAssistant: "
_snake_case = "huggingface-tools/default-prompts"
_snake_case = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
_lowerCAmelCase : str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCamelCase ) is not None:
return prompt_or_repo_id
_lowerCAmelCase : Dict = cached_file(
_lowerCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 359
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 0
|
def A ( _lowerCamelCase = 50 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 360
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 0
|
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 0.00
_lowerCAmelCase : List[str] = 0
for resistor in resistors:
if resistor <= 0:
_lowerCAmelCase : Tuple = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(_lowerCamelCase )
first_sum += 1 / float(_lowerCamelCase )
index += 1
return 1 / first_sum
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = 0.00
_lowerCAmelCase : str = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowerCAmelCase : Dict = F"Resistor at index {index} has a negative value!"
raise ValueError(_lowerCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : str = None
if token is not None:
_lowerCAmelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
_lowerCAmelCase : Optional[Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_lowerCAmelCase : Dict = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
_lowerCAmelCase : List[Any] = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase : str = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : int = requests.get(url + F"&page={i + 2}" , headers=_lowerCamelCase ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = None
if token is not None:
_lowerCAmelCase : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
_lowerCAmelCase : Any = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
_lowerCAmelCase : List[str] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
_lowerCAmelCase : Union[str, Any] = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase : Tuple = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : int = requests.get(url + F"&page={i + 2}" , headers=_lowerCamelCase ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
if token is not None:
_lowerCAmelCase : Union[str, Any] = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
_lowerCAmelCase : str = requests.get(_lowerCamelCase , headers=_lowerCamelCase , allow_redirects=_lowerCamelCase )
_lowerCAmelCase : Tuple = result.headers["Location"]
_lowerCAmelCase : int = requests.get(_lowerCamelCase , allow_redirects=_lowerCamelCase )
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase , F"{artifact_name}.zip" )
with open(_lowerCamelCase , "wb" ) as fp:
fp.write(response.content )
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = None
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_lowerCamelCase ) as f:
for line in f:
_lowerCAmelCase : Optional[Any] = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase : List[str] = line[: line.index(": " )]
_lowerCAmelCase : Union[str, Any] = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase : Tuple = line[len("FAILED " ) :]
failed_tests.append(_lowerCamelCase )
elif filename == "job_name.txt":
_lowerCAmelCase : Optional[int] = line
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(_lowerCamelCase )} for `errors` "
F"and {len(_lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
_lowerCAmelCase : Any = None
if job_name and job_links:
_lowerCAmelCase : Dict = job_links.get(_lowerCamelCase , _lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(_lowerCamelCase , _lowerCamelCase )]
return result
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_lowerCamelCase , job_links=_lowerCamelCase ) )
return errors
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Dict = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase : Any = counter.most_common()
_lowerCAmelCase : int = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase : List[str] = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase : str = dict(sorted(r.items() , key=lambda _lowerCamelCase : item[1]["count"] , reverse=_lowerCamelCase ) )
return r
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase : str = test.split("/" )[2]
else:
_lowerCAmelCase : Union[str, Any] = None
return test
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase : List[Any] = [x for x in logs if x[2] is not None]
_lowerCAmelCase : Optional[int] = {x[2] for x in logs}
_lowerCAmelCase : Optional[Any] = {}
for test in tests:
_lowerCAmelCase : Tuple = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase : Tuple = counter.most_common()
_lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase : Any = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase : Optional[Any] = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase : str = dict(sorted(r.items() , key=lambda _lowerCamelCase : item[1]["count"] , reverse=_lowerCamelCase ) )
return r
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = "| no. | error | status |"
_lowerCAmelCase : Tuple = "|-:|:-|:-|"
_lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase : Optional[int] = reduced_by_error[error]["count"]
_lowerCAmelCase : Union[str, Any] = F"| {count} | {error[:100]} | |"
lines.append(_lowerCamelCase )
return "\n".join(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "| model | no. of errors | major error | count |"
_lowerCAmelCase : Optional[Any] = "|-:|-:|-:|-:|"
_lowerCAmelCase : int = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase : str = reduced_by_model[model]["count"]
_lowerCAmelCase : int = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase : Dict = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(_lowerCamelCase )
return "\n".join(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 362
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 0
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = len(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_lowerCamelCase ) - pat_len + 1 ):
_lowerCAmelCase : Optional[int] = True
for j in range(_lowerCamelCase ):
if s[i + j] != pattern[j]:
_lowerCAmelCase : Any = False
break
if match_found:
position.append(_lowerCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 363
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 0
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def snake_case__ ( self, __a=0):
'''simple docstring'''
_lowerCAmelCase : int = floats_tensor((1, 3, 128, 128), rng=random.Random(__a))
_lowerCAmelCase : Optional[Any] = np.random.RandomState(__a)
_lowerCAmelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : Tuple = pipe(**__a).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Dict = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Any = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**__a).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : List[Any] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
# warmup pass to apply optimizations
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs())
_lowerCAmelCase : str = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**__a).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Dict = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCAmelCase : List[str] = pipe(**__a).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Optional[int] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : int = self.get_dummy_inputs()
_lowerCAmelCase : Optional[Any] = pipe(**__a).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Dict = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : int = pipe(**__a).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : List[str] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ort.SessionOptions()
_lowerCAmelCase : Dict = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_lowerCAmelCase : Dict = init_image.resize((768, 512))
# using the PNDM scheduler by default
_lowerCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A fantasy landscape, trending on artstation"
_lowerCAmelCase : str = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=__a, output_type="np", )
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : int = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_lowerCAmelCase : Tuple = init_image.resize((768, 512))
_lowerCAmelCase : List[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx")
_lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=__a, safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = "A fantasy landscape, trending on artstation"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Optional[int] = pipe(
prompt=__a, image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=__a, output_type="np", )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : Any = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 364
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 0
|
from math import factorial
def A ( _lowerCamelCase = 100 ):
'''simple docstring'''
return sum(int(_lowerCamelCase ) for x in str(factorial(_lowerCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 365
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
requires_backends(self, "vision")
self.check_model_type(__a)
def __call__( self, __a, __a = None, **__a, ):
'''simple docstring'''
if "text_queries" in kwargs:
_lowerCAmelCase : str = kwargs.pop("text_queries")
if isinstance(__a, (str, Image.Image)):
_lowerCAmelCase : int = {"image": image, "candidate_labels": candidate_labels}
else:
_lowerCAmelCase : Optional[Any] = image
_lowerCAmelCase : Tuple = super().__call__(__a, **__a)
return results
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
if "threshold" in kwargs:
_lowerCAmelCase : Dict = kwargs["threshold"]
if "top_k" in kwargs:
_lowerCAmelCase : str = kwargs["top_k"]
return {}, {}, postprocess_params
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_image(inputs["image"])
_lowerCAmelCase : Optional[Any] = inputs["candidate_labels"]
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = candidate_labels.split(",")
_lowerCAmelCase : Optional[int] = torch.tensor([[image.height, image.width]], dtype=torch.intaa)
for i, candidate_label in enumerate(__a):
_lowerCAmelCase : Any = self.tokenizer(__a, return_tensors=self.framework)
_lowerCAmelCase : Optional[Any] = self.image_processor(__a, return_tensors=self.framework)
yield {
"is_last": i == len(__a) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = model_inputs.pop("target_size")
_lowerCAmelCase : str = model_inputs.pop("candidate_label")
_lowerCAmelCase : str = model_inputs.pop("is_last")
_lowerCAmelCase : Optional[int] = self.model(**__a)
_lowerCAmelCase : Dict = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def snake_case__ ( self, __a, __a=0.1, __a=None):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for model_output in model_outputs:
_lowerCAmelCase : Any = model_output["candidate_label"]
_lowerCAmelCase : Union[str, Any] = BaseModelOutput(__a)
_lowerCAmelCase : str = self.image_processor.post_process_object_detection(
outputs=__a, threshold=__a, target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_lowerCAmelCase : int = outputs["scores"][index].item()
_lowerCAmelCase : Optional[Any] = self._get_bounding_box(outputs["boxes"][index][0])
_lowerCAmelCase : Optional[int] = {"score": score, "label": label, "box": box}
results.append(__a)
_lowerCAmelCase : Optional[Any] = sorted(__a, key=lambda __a: x["score"], reverse=__a)
if top_k:
_lowerCAmelCase : Union[str, Any] = results[:top_k]
return results
def snake_case__ ( self, __a):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_lowerCAmelCase : Any = box.int().tolist()
_lowerCAmelCase : int = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 366
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 0
|
# Lint as: python3
import itertools
import os
import re
_snake_case = re.compile(R"([A-Z]+)([A-Z][a-z])")
_snake_case = re.compile(R"([a-z\d])([A-Z])")
_snake_case = re.compile(R"(?<!_)_(?!_)")
_snake_case = re.compile(R"(_{2,})")
_snake_case = R"^\w+(\.\w+)*$"
_snake_case = R"<>:/\|?*"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = _uppercase_uppercase_re.sub(r"\1_\2" , _lowerCamelCase )
_lowerCAmelCase : List[str] = _lowercase_uppercase_re.sub(r"\1_\2" , _lowerCamelCase )
return name.lower()
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = _single_underscore_re.split(_lowerCamelCase )
_lowerCAmelCase : Tuple = [_multiple_underscores_re.split(_lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCamelCase ) if n != "" )
def A ( _lowerCamelCase ):
'''simple docstring'''
if os.path.basename(_lowerCamelCase ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if os.path.basename(_lowerCamelCase ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , _lowerCamelCase ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(_lowerCamelCase )}-{split}"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = filename_prefix_for_split(_lowerCamelCase , _lowerCamelCase )
if filetype_suffix:
prefix += F".{filetype_suffix}"
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
return F"{filepath}*"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = filename_prefix_for_split(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if shard_lengths:
_lowerCAmelCase : List[Any] = len(_lowerCamelCase )
_lowerCAmelCase : List[Any] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(_lowerCamelCase )]
if filetype_suffix:
_lowerCAmelCase : Optional[Any] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
_lowerCAmelCase : Any = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 367
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 0
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def A ( _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[column] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=float("inf" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowerCamelCase ):
_lowerCAmelCase : int = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCAmelCase : Any = current_dis
return min_dis
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=float("inf" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _lowerCamelCase ):
for j in range(max(0 , i - 6 ) , _lowerCamelCase ):
_lowerCAmelCase : Tuple = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCAmelCase : Any = current_dis
return min_dis
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_lowerCamelCase , _lowerCamelCase )
# recursion
_lowerCAmelCase : str = points_counts // 2
_lowerCAmelCase : Any = closest_pair_of_points_sqr(
_lowerCamelCase , points_sorted_on_y[:mid] , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = closest_pair_of_points_sqr(
_lowerCamelCase , points_sorted_on_y[mid:] , points_counts - mid )
_lowerCAmelCase : Tuple = min(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = dis_between_closest_in_strip(
_lowerCamelCase , len(_lowerCamelCase ) , _lowerCamelCase )
return min(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = column_based_sort(_lowerCamelCase , column=0 )
_lowerCAmelCase : Optional[Any] = column_based_sort(_lowerCamelCase , column=1 )
return (
closest_pair_of_points_sqr(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
) ** 0.5
if __name__ == "__main__":
_snake_case = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 368
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = StableDiffusionPanoramaPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, )
_lowerCAmelCase : str = DDIMScheduler()
torch.manual_seed(0)
_lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
torch.manual_seed(0)
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
_lowerCAmelCase : List[str] = CLIPTextModel(__a)
_lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_lowerCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self, __a, __a=0):
'''simple docstring'''
_lowerCAmelCase : int = torch.manual_seed(__a)
_lowerCAmelCase : Dict = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : str = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Dict = sd_pipe(**__a).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Any = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def snake_case__ ( self):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=3.25E-3)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Optional[Any] = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Any = "french fries"
_lowerCAmelCase : Union[str, Any] = sd_pipe(**__a, negative_prompt=__a)
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Union[str, Any] = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Any = sd_pipe(**__a, view_batch_size=2)
_lowerCAmelCase : Tuple = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Any = EulerAncestralDiscreteScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear")
_lowerCAmelCase : Optional[int] = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Any = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : str = self.get_dummy_inputs(__a)
_lowerCAmelCase : Union[str, Any] = sd_pipe(**__a).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : int = PNDMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=__a)
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Dict = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : int = self.get_dummy_inputs(__a)
_lowerCAmelCase : str = sd_pipe(**__a).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self, __a=0):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(__a)
_lowerCAmelCase : Union[str, Any] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase : List[Any] = DDIMScheduler.from_pretrained(__a, subfolder="scheduler")
_lowerCAmelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(__a, scheduler=__a, safety_checker=__a)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_lowerCAmelCase : Dict = self.get_inputs()
_lowerCAmelCase : Union[str, Any] = pipe(**__a).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase : int = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
])
assert np.abs(expected_slice - image_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base", safety_checker=__a)
_lowerCAmelCase : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_lowerCAmelCase : Union[str, Any] = self.get_inputs()
_lowerCAmelCase : Optional[int] = pipe(**__a).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase : Tuple = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = 0
def callback_fn(__a, __a, __a) -> None:
_lowerCAmelCase : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCAmelCase : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase : Dict = latents[0, -3:, -3:, -1]
_lowerCAmelCase : str = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
elif step == 2:
_lowerCAmelCase : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase : Any = latents[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
_lowerCAmelCase : str = False
_lowerCAmelCase : List[Any] = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase : Any = DDIMScheduler.from_pretrained(__a, subfolder="scheduler")
_lowerCAmelCase : Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(__a, scheduler=__a, safety_checker=__a)
_lowerCAmelCase : Dict = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_lowerCAmelCase : List[str] = self.get_inputs()
pipe(**__a, callback=__a, callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case__ ( self):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : str = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase : Optional[Any] = DDIMScheduler.from_pretrained(__a, subfolder="scheduler")
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(__a, scheduler=__a, safety_checker=__a)
_lowerCAmelCase : Optional[int] = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : int = self.get_inputs()
_lowerCAmelCase : int = pipe(**__a)
_lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 369
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a) , 'Tatoeba directory does not exist.')
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"])
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.resolver.write_model_card("opus-mt-he-en", dry_run=__a)
assert mmeta["long_pair"] == "heb-eng"
| 370
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 0
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = DebertaVaTokenizer
lowerCamelCase__ = DebertaVaTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Dict = DebertaVaTokenizer(__a, unk_token="<unk>")
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = "this is a test"
_lowerCAmelCase : Optional[int] = "this is a test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "<pad>"
_lowerCAmelCase : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a), __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<pad>")
self.assertEqual(vocab_keys[1], "<unk>")
self.assertEqual(vocab_keys[-1], "[PAD]")
self.assertEqual(len(__a), 3_0001)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = " \tHeLLo!how \n Are yoU? "
_lowerCAmelCase : Any = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
_lowerCAmelCase : Optional[Any] = DebertaVaTokenizer(__a, do_lower_case=__a)
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
_lowerCAmelCase : List[str] = DebertaVaTokenizerFast(__a, do_lower_case=__a)
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "I was born in 92000, and this is falsé."
_lowerCAmelCase : List[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_lowerCAmelCase : str = DebertaVaTokenizer(__a, split_by_punct=__a)
_lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
_lowerCAmelCase : Optional[Any] = DebertaVaTokenizerFast(__a, split_by_punct=__a)
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "I was born in 92000, and this is falsé."
_lowerCAmelCase : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(__a, do_lower_case=__a, split_by_punct=__a)
_lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
_lowerCAmelCase : Union[str, Any] = DebertaVaTokenizerFast(__a, do_lower_case=__a, split_by_punct=__a)
_lowerCAmelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "I was born in 92000, and this is falsé."
_lowerCAmelCase : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(__a, do_lower_case=__a, split_by_punct=__a)
_lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = DebertaVaTokenizerFast(__a, do_lower_case=__a, split_by_punct=__a)
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "I was born in 92000, and this is falsé."
_lowerCAmelCase : Tuple = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_lowerCAmelCase : Optional[int] = DebertaVaTokenizer(__a, do_lower_case=__a, split_by_punct=__a)
_lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
_lowerCAmelCase : Optional[int] = DebertaVaTokenizerFast(__a, do_lower_case=__a, split_by_punct=__a)
_lowerCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = " \tHeLLo!how \n Are yoU? "
_lowerCAmelCase : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
_lowerCAmelCase : List[Any] = DebertaVaTokenizer(__a, do_lower_case=__a, split_by_punct=__a)
_lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
_lowerCAmelCase : List[Any] = DebertaVaTokenizerFast(__a, do_lower_case=__a, split_by_punct=__a)
_lowerCAmelCase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = "I was born in 92000, and this is falsé."
_lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a, add_special_tokens=__a))
_lowerCAmelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a, add_special_tokens=__a))
self.assertListEqual(__a, __a)
_lowerCAmelCase : str = tokenizer.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : Tuple = rust_tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Any = tokenizer.encode(__a)
_lowerCAmelCase : Tuple = rust_tokenizer.encode(__a)
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = "This is a test"
_lowerCAmelCase : Dict = [13, 1, 4398, 25, 21, 1289]
_lowerCAmelCase : str = ["▁", "T", "his", "▁is", "▁a", "▁test"]
_lowerCAmelCase : Dict = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
_lowerCAmelCase : Tuple = DebertaVaTokenizer(__a, keep_accents=__a)
_lowerCAmelCase : Dict = DebertaVaTokenizerFast(__a, keep_accents=__a)
_lowerCAmelCase : Optional[Any] = tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Any = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : int = rust_tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Optional[int] = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(__a, __a)
# fmt: off
_lowerCAmelCase : Optional[Any] = "I was born in 92000, and this is falsé."
_lowerCAmelCase : int = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_lowerCAmelCase : Optional[Any] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
_lowerCAmelCase : List[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Optional[int] = rust_tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = DebertaVaTokenizer(__a)
_lowerCAmelCase : List[Any] = tokenizer.encode("sequence builders")
_lowerCAmelCase : str = tokenizer.encode("multi-sequence build")
_lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a)
_lowerCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a, __a)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], __a)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], __a, )
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = {"input_ids": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a, model_name="microsoft/deberta-v2-xlarge", revision="ad6e42c1532ddf3a15c39246b63f5559d558b670", )
| 371
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 0
|
import re
from filelock import FileLock
try:
import nltk
_snake_case = True
except (ImportError, ModuleNotFoundError):
_snake_case = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def A ( _lowerCamelCase ):
'''simple docstring'''
re.sub("<n>" , "" , _lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase ) )
| 350
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 0
|
class UpperCAmelCase_ :
def __init__( self, __a, __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = data
_lowerCAmelCase : List[Any] = previous
_lowerCAmelCase : Tuple = next_node
def __str__( self):
'''simple docstring'''
return f"{self.data}"
def snake_case__ ( self):
'''simple docstring'''
return self.data
def snake_case__ ( self):
'''simple docstring'''
return self.next
def snake_case__ ( self):
'''simple docstring'''
return self.previous
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = head
def __iter__( self):
'''simple docstring'''
return self
def snake_case__ ( self):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
_lowerCAmelCase : str = self.current.get_data()
_lowerCAmelCase : List[Any] = self.current.get_next()
return value
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = None # First node in list
_lowerCAmelCase : int = None # Last node in list
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : int = self.head
_lowerCAmelCase : Any = []
while current is not None:
nodes.append(current.get_data())
_lowerCAmelCase : List[str] = current.get_next()
return " ".join(str(__a) for node in nodes)
def __contains__( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.head
while current:
if current.get_data() == value:
return True
_lowerCAmelCase : List[Any] = current.get_next()
return False
def __iter__( self):
'''simple docstring'''
return LinkedListIterator(self.head)
def snake_case__ ( self):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def snake_case__ ( self):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def snake_case__ ( self, __a):
'''simple docstring'''
if self.head is None:
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : List[str] = node
else:
self.insert_before_node(self.head, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if self.head is None:
self.set_head(__a)
else:
self.insert_after_node(self.tail, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Node(__a)
if self.head is None:
self.set_head(__a)
else:
self.set_tail(__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = node
_lowerCAmelCase : str = node.previous
if node.get_previous() is None:
_lowerCAmelCase : Tuple = node_to_insert
else:
_lowerCAmelCase : str = node_to_insert
_lowerCAmelCase : Optional[Any] = node_to_insert
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = node
_lowerCAmelCase : int = node.next
if node.get_next() is None:
_lowerCAmelCase : Optional[int] = node_to_insert
else:
_lowerCAmelCase : List[str] = node_to_insert
_lowerCAmelCase : Tuple = node_to_insert
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : List[str] = Node(__a)
_lowerCAmelCase : Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(__a, __a)
return
current_position += 1
_lowerCAmelCase : Any = node.next
self.insert_after_node(self.tail, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.head
while node:
if node.get_data() == item:
return node
_lowerCAmelCase : Any = node.get_next()
raise Exception("Node not found")
def snake_case__ ( self, __a):
'''simple docstring'''
if (node := self.get_node(__a)) is not None:
if node == self.head:
_lowerCAmelCase : Optional[int] = self.head.get_next()
if node == self.tail:
_lowerCAmelCase : Optional[int] = self.tail.get_previous()
self.remove_node_pointers(__a)
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
if node.get_next():
_lowerCAmelCase : Union[str, Any] = node.previous
if node.get_previous():
_lowerCAmelCase : Union[str, Any] = node.next
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : List[str] = None
def snake_case__ ( self):
'''simple docstring'''
return self.head is None
def A ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 0
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 'longest'
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 352
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 0
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_snake_case = logging.getLogger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = bnb_quantization_config.load_in_abit
_lowerCAmelCase : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
_lowerCAmelCase : Tuple = []
# custom device map
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(device_map.keys() ) > 1:
_lowerCAmelCase : int = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_lowerCAmelCase : Optional[Any] = get_keys_to_not_convert(_lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCamelCase )
_lowerCAmelCase : str = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Optional[int] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCamelCase )
# compatibility with peft
_lowerCAmelCase : List[Any] = load_in_abit
_lowerCAmelCase : Any = load_in_abit
_lowerCAmelCase : Any = get_parameter_device(_lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
_lowerCAmelCase : Optional[int] = replace_with_bnb_layers(_lowerCamelCase , _lowerCamelCase , modules_to_not_convert=_lowerCamelCase )
# convert param to the right dtype
_lowerCAmelCase : List[str] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_lowerCAmelCase : Tuple = name.replace(".weight" , "" ).replace(".bias" , "" )
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCamelCase ):
param.to(_lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
_lowerCAmelCase : Any = replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , modules_to_not_convert=_lowerCamelCase )
_lowerCAmelCase : List[str] = get_quantized_model_device_map(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , max_memory=_lowerCamelCase , no_split_module_classes=_lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Union[str, Any] = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCamelCase , offload_state_dict=_lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCamelCase , device_map=_lowerCamelCase , offload_dir=_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
_lowerCAmelCase : int = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
_lowerCAmelCase : Union[str, Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_lowerCAmelCase : int = {}
_lowerCAmelCase : List[str] = special_dtypes
_lowerCAmelCase : Any = no_split_module_classes
_lowerCAmelCase : Optional[int] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_lowerCAmelCase : Dict = get_balanced_memory(
_lowerCamelCase , low_zero=(device_map == "balanced_low_0") , max_memory=_lowerCamelCase , **_lowerCamelCase , )
_lowerCAmelCase : int = max_memory
_lowerCAmelCase : List[str] = infer_auto_device_map(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
# check if don't have any quantized module on the cpu
_lowerCAmelCase : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_lowerCAmelCase : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if modules_to_not_convert is None:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Any = _replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = False
for name, module in model.named_children():
if current_key_name is None:
_lowerCAmelCase : Dict = []
current_key_name.append(_lowerCamelCase )
if isinstance(_lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_lowerCAmelCase : Tuple = ".".join(_lowerCamelCase )
_lowerCAmelCase : Dict = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_lowerCAmelCase : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_lowerCAmelCase : Any = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_lowerCAmelCase : List[str] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
_lowerCAmelCase : Any = module.weight.data
if module.bias is not None:
_lowerCAmelCase : Optional[int] = module.bias.data
bnb_module.requires_grad_(_lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = True
if len(list(module.children() ) ) > 0:
_lowerCAmelCase : int = _replace_with_bnb_layers(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : str = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A ( _lowerCamelCase ):
'''simple docstring'''
with init_empty_weights():
_lowerCAmelCase : str = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_lowerCAmelCase : Union[str, Any] = find_tied_parameters(_lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCAmelCase : Any = sum(_lowerCamelCase , [] )
_lowerCAmelCase : Tuple = len(_lowerCamelCase ) > 0
# Check if it is a base model
_lowerCAmelCase : Any = False
if hasattr(_lowerCamelCase , "base_model_prefix" ):
_lowerCAmelCase : List[str] = not hasattr(_lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCAmelCase : Any = list(model.named_children() )
_lowerCAmelCase : Dict = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCAmelCase : Dict = set(_lowerCamelCase ) - set(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase )
# remove ".weight" from the keys
_lowerCAmelCase : Any = [".weight", ".bias"]
_lowerCAmelCase : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCAmelCase : Any = name.replace(_lowerCamelCase , "" )
filtered_module_names.append(_lowerCamelCase )
return filtered_module_names
def A ( _lowerCamelCase ):
'''simple docstring'''
for m in model.modules():
if isinstance(_lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def A ( _lowerCamelCase ):
'''simple docstring'''
return next(parameter.parameters() ).device
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCamelCase , _lowerCamelCase , 0 , dtype=_lowerCamelCase , value=_lowerCamelCase )
_lowerCAmelCase : Dict = param_name
_lowerCAmelCase : Optional[Any] = model
if "." in tensor_name:
_lowerCAmelCase : List[Any] = tensor_name.split("." )
for split in splits[:-1]:
_lowerCAmelCase : Any = getattr(_lowerCamelCase , _lowerCamelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_lowerCAmelCase : List[Any] = new_module
_lowerCAmelCase : Union[str, Any] = splits[-1]
# offload weights
_lowerCAmelCase : List[Any] = False
offload_weight(module._parameters[tensor_name] , _lowerCamelCase , _lowerCamelCase , index=_lowerCamelCase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , _lowerCamelCase , index=_lowerCamelCase , )
else:
offload_weight(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index=_lowerCamelCase )
offload_weight(_lowerCamelCase , param_name.replace("weight" , "SCB" ) , _lowerCamelCase , index=_lowerCamelCase )
set_module_tensor_to_device(_lowerCamelCase , _lowerCamelCase , "meta" , dtype=_lowerCamelCase , value=torch.empty(*param.size() ) )
| 353
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 0
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class UpperCAmelCase_ ( a):
def __init__( self, __a=None, __a=None, *__a, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
if config is None:
assert isinstance(self.model, __a), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
_lowerCAmelCase : Tuple = self.model.config
else:
_lowerCAmelCase : List[Any] = config
_lowerCAmelCase : Dict = data_args
_lowerCAmelCase : List[str] = self.config.tgt_vocab_size if isinstance(self.config, __a) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
" padding..")
if self.args.label_smoothing == 0:
_lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_lowerCAmelCase : str = label_smoothed_nll_loss
def snake_case__ ( self, __a):
'''simple docstring'''
if self.optimizer is None:
_lowerCAmelCase : Optional[Any] = ["bias", "LayerNorm.weight"]
_lowerCAmelCase : int = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
_lowerCAmelCase : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_lowerCAmelCase : List[Any] = Adafactor
_lowerCAmelCase : List[str] = {"scale_parameter": False, "relative_step": False}
else:
_lowerCAmelCase : str = AdamW
_lowerCAmelCase : int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_lowerCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
_lowerCAmelCase : Dict = OSS(
params=__a, optim=__a, **__a, )
else:
_lowerCAmelCase : Union[str, Any] = optimizer_cls(__a, **__a)
if self.lr_scheduler is None:
_lowerCAmelCase : Dict = self._get_lr_scheduler(__a)
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.")
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_lowerCAmelCase : List[Any] = schedule_func(self.optimizer)
elif self.args.lr_scheduler == "constant_w_warmup":
_lowerCAmelCase : List[Any] = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps)
else:
_lowerCAmelCase : Any = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=__a)
return scheduler
def snake_case__ ( self):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset)
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_lowerCAmelCase : List[Any] = model(**__a, use_cache=__a)[0]
_lowerCAmelCase : Tuple = self.loss_fn(logits.view(-1, logits.shape[-1]), labels.view(-1))
else:
# compute usual loss via models
_lowerCAmelCase : Optional[Any] = model(**__a, labels=__a, use_cache=__a)[:2]
else:
# compute label smoothed loss
_lowerCAmelCase : str = model(**__a, use_cache=__a)[0]
_lowerCAmelCase : Any = torch.nn.functional.log_softmax(__a, dim=-1)
_lowerCAmelCase : Union[str, Any] = self.loss_fn(__a, __a, self.args.label_smoothing, ignore_index=self.config.pad_token_id)
return loss, logits
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = inputs.pop("labels")
_lowerCAmelCase : Optional[Any] = self._compute_loss(__a, __a, __a)
return loss
def snake_case__ ( self, __a, __a, __a, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : Any = self._prepare_inputs(__a)
_lowerCAmelCase : str = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_lowerCAmelCase : Any = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **__a, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_lowerCAmelCase : Dict = self._pad_tensors_to_max_len(__a, gen_kwargs["max_length"])
_lowerCAmelCase : Union[str, Any] = inputs.pop("labels")
with torch.no_grad():
# compute loss on predict data
_lowerCAmelCase : Any = self._compute_loss(__a, __a, __a)
_lowerCAmelCase : str = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_lowerCAmelCase : Tuple = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_lowerCAmelCase : Union[str, Any] = self._pad_tensors_to_max_len(__a, gen_kwargs["max_length"])
return (loss, logits, labels)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f" padded to `max_length`={max_length}")
_lowerCAmelCase : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device)
_lowerCAmelCase : int = tensor
return padded_tensor
| 354
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = torch.load(_lowerCamelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCAmelCase : Optional[int] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase : Any = sd.pop(_lowerCamelCase )
_lowerCAmelCase : List[str] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase : Dict = sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase : str = key.replace(".qkv_proj." , ".q_proj." )
_lowerCAmelCase : Dict = key.replace(".qkv_proj." , ".k_proj." )
_lowerCAmelCase : Union[str, Any] = key.replace(".qkv_proj." , ".v_proj." )
_lowerCAmelCase : int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase : int = torch.split(_lowerCamelCase , depth // 3 , dim=0 )
_lowerCAmelCase : Tuple = q
_lowerCAmelCase : Dict = k
_lowerCAmelCase : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_checkpoint(_lowerCamelCase )
if config is not None:
_lowerCAmelCase : Any = OPTConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : List[Any] = OPTConfig()
_lowerCAmelCase : str = OPTModel(_lowerCamelCase ).half().eval()
model.load_state_dict(_lowerCamelCase )
# Check results
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_snake_case = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 355
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 0
|
import functools
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = len(_lowerCamelCase )
_lowerCAmelCase : Dict = len(_lowerCamelCase )
@functools.cache
def min_distance(_lowerCamelCase , _lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_lowerCAmelCase : Dict = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCamelCase ) , 1 + min_distance(_lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 0
|
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = set(range(3 , _lowerCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _lowerCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowerCamelCase , _lowerCamelCase ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowerCamelCase , limit + 1 , _lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 357
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Optional[Any] = seq_length
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : Tuple = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : List[Any] = scope
_lowerCAmelCase : int = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# convert bbox to numpy since TF does not support item assignment
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : List[Any] = bbox[i, j, 3]
_lowerCAmelCase : str = bbox[i, j, 1]
_lowerCAmelCase : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : Dict = bbox[i, j, 2]
_lowerCAmelCase : int = bbox[i, j, 0]
_lowerCAmelCase : Optional[int] = t
_lowerCAmelCase : Any = tf.convert_to_tensor(__a)
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Dict = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : List[str] = LayoutLMConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFLayoutLMModel(config=__a)
_lowerCAmelCase : Any = model(__a, __a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, __a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, __a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = TFLayoutLMForMaskedLM(config=__a)
_lowerCAmelCase : int = model(__a, __a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : Dict = TFLayoutLMForSequenceClassification(config=__a)
_lowerCAmelCase : Optional[int] = model(__a, __a, attention_mask=__a, token_type_ids=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : List[str] = TFLayoutLMForTokenClassification(config=__a)
_lowerCAmelCase : List[Any] = model(__a, __a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFLayoutLMForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = model(__a, __a, attention_mask=__a, token_type_ids=__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Dict = config_and_inputs
_lowerCAmelCase : Optional[int] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = 10
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFLayoutLMModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFLayoutLMModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@unittest.skip("Onnx compliancy broke with TF 2.10")
def snake_case__ ( self):
'''simple docstring'''
pass
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowerCAmelCase : str = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowerCAmelCase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowerCAmelCase : List[Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased")
_lowerCAmelCase : Any = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : str = model(input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
# test the sequence output on [0, :3, :3]
_lowerCAmelCase : List[Any] = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]], )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], __a, atol=1E-3))
# test the pooled output on [1, :3]
_lowerCAmelCase : Dict = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552])
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], __a, atol=1E-3))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2)
_lowerCAmelCase : int = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : int = model(
input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=tf.convert_to_tensor([1, 1]), )
# test whether we get a loss as a scalar
_lowerCAmelCase : Dict = outputs.loss
_lowerCAmelCase : str = (2,)
self.assertEqual(loss.shape, __a)
# test the shape of the logits
_lowerCAmelCase : int = outputs.logits
_lowerCAmelCase : int = (2, 2)
self.assertEqual(logits.shape, __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13)
_lowerCAmelCase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : Tuple = model(
input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
# test the shape of the logits
_lowerCAmelCase : int = outputs.logits
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor((2, 25, 13))
self.assertEqual(logits.shape, __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased")
_lowerCAmelCase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : str = model(input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
# test the shape of the logits
_lowerCAmelCase : List[Any] = tf.convert_to_tensor((2, 25))
self.assertEqual(outputs.start_logits.shape, __a)
self.assertEqual(outputs.end_logits.shape, __a)
| 358
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 359
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 0
|
from __future__ import annotations
_snake_case = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = len(_lowerCamelCase )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : float = -1
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] < arr[j]:
_lowerCAmelCase : Optional[int] = arr[j]
break
result.append(_lowerCamelCase )
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for i, outer in enumerate(_lowerCamelCase ):
_lowerCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowerCAmelCase : str = inner
break
result.append(_lowerCamelCase )
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = len(_lowerCamelCase )
_lowerCAmelCase : list[float] = []
_lowerCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(_lowerCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowerCAmelCase : List[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 360
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 0
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
_snake_case = namedtuple("covid_data", "cases deaths recovered")
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(_lowerCamelCase ).content ).xpath(_lowerCamelCase ) )
_snake_case = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 361
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, "models/bert/"))
_lowerCAmelCase : List[Any] = self.transformer_dir
shutil.copy(
os.path.join(__a, "src/transformers/models/bert/modeling_bert.py"), os.path.join(self.transformer_dir, "models/bert/modeling_bert.py"), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "src/transformers"
shutil.rmtree(self.transformer_dir)
def snake_case__ ( self, __a, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Tuple = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
_lowerCAmelCase : Tuple = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
_lowerCAmelCase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119)
_lowerCAmelCase : str = black.format_str(__a, mode=__a)
_lowerCAmelCase : Optional[Any] = os.path.join(self.transformer_dir, "new_code.py")
with open(__a, "w", newline="\n") as f:
f.write(__a)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__a)) == 0)
else:
check_copies.is_copy_consistent(f.name, overwrite=__a)
with open(__a, "r") as f:
self.assertTrue(f.read(), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead")
self.assertEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", REFERENCE_CODE + "\n", )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", __a, )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", re.sub("Bert", "TestModel", __a), )
# Copy consistency with a really long name
_lowerCAmelCase : Dict = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}", f"{long_class_name}LMPredictionHead", re.sub("Bert", __a, __a), )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", __a, overwrite_result=re.sub("Bert", "TestModel", __a), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
_lowerCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
_lowerCAmelCase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
_lowerCAmelCase : Dict = check_copies.convert_to_localized_md(
__a, __a, localized_readme["format_model_list"])
self.assertFalse(__a)
self.assertEqual(__a, __a)
_lowerCAmelCase : Union[str, Any] = check_copies.convert_to_localized_md(
__a, __a, localized_readme["format_model_list"])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__a)
_lowerCAmelCase : Union[str, Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
_lowerCAmelCase : List[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase : Optional[int] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase : Optional[Any] = check_copies.convert_to_localized_md(
__a, __a, localized_readme["format_model_list"])
# Check if the model link is synchronized.
self.assertEqual(__a, __a)
| 362
|
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300
| 0
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = Vector([1, 2, 3])
self.assertEqual(x.component(0), 1)
self.assertEqual(x.component(2), 3)
_lowerCAmelCase : int = Vector()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(__a), "(0,0,0,0,0,1)")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = Vector([1, 2, 3, 4])
self.assertEqual(len(__a), 4)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Vector([1, 2])
_lowerCAmelCase : List[Any] = Vector([1, 2, 3, 4, 5])
_lowerCAmelCase : int = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_lowerCAmelCase : Optional[int] = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length(), 2.236, 3)
self.assertAlmostEqual(y.euclidean_length(), 7.416, 3)
self.assertEqual(z.euclidean_length(), 0)
self.assertAlmostEqual(w.euclidean_length(), 7.616, 3)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Vector([1, 2, 3])
_lowerCAmelCase : List[str] = Vector([1, 1, 1])
self.assertEqual((x + y).component(0), 2)
self.assertEqual((x + y).component(1), 3)
self.assertEqual((x + y).component(2), 4)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = Vector([1, 2, 3])
_lowerCAmelCase : str = Vector([1, 1, 1])
self.assertEqual((x - y).component(0), 0)
self.assertEqual((x - y).component(1), 1)
self.assertEqual((x - y).component(2), 2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = Vector([1, 2, 3])
_lowerCAmelCase : str = Vector([2, -1, 4]) # for test of dot product
_lowerCAmelCase : List[str] = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0), "(3.0,6.0,9.0)")
self.assertEqual((a * b), 0)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(str(zero_vector(10)).count("0"), 10)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3, 1)), "(0,1,0)")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Vector([1, 2, 3])
_lowerCAmelCase : int = Vector([1, 0, 1])
self.assertEqual(str(axpy(2, __a, __a)), "(3,4,7)")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Vector([1, 0, 0, 0, 0, 0])
_lowerCAmelCase : Optional[int] = x.copy()
self.assertEqual(str(__a), str(__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Vector([1, 0, 0])
x.change_component(0, 0)
x.change_component(1, 1)
self.assertEqual(str(__a), "(0,1,0)")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
_lowerCAmelCase : Any = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y], a.minor(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
_lowerCAmelCase : Optional[int] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y], a.cofactor(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
self.assertEqual(-5, a.determinant())
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3)
_lowerCAmelCase : Dict = Vector([1, 2, 3])
self.assertEqual("(14,32,50)", str(a * x))
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(a * 2))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
a.change_component(0, 2, 5)
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
self.assertEqual(7, a.component(2, 1), 0.01)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
_lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(a + b))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3)
_lowerCAmelCase : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3)
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(a - b))
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n", str(square_zero_matrix(5)), )
if __name__ == "__main__":
unittest.main()
| 363
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Dict = feat_extract_activation
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : List[Any] = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Any = len(self.conv_dim)
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : Dict = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : int = mask_time_prob
_lowerCAmelCase : str = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : List[Any] = mask_feature_prob
_lowerCAmelCase : List[Any] = mask_feature_length
_lowerCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : List[str] = num_codevector_groups
_lowerCAmelCase : List[Any] = contrastive_logits_temperature
_lowerCAmelCase : int = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Dict = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Optional[Any] = ctc_loss_reduction
_lowerCAmelCase : str = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Tuple = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[Any] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : List[str] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 300
| 0
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def A ( _lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase : Union[str, Any] = image[0].size
_lowerCAmelCase : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_lowerCAmelCase : Dict = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_lowerCAmelCase : Optional[Any] = np.concatenate(_lowerCamelCase , axis=0 )
_lowerCAmelCase : str = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
_lowerCAmelCase : Any = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase : Any = 2.0 * image - 1.0
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase : Optional[int] = torch.cat(_lowerCamelCase , dim=0 )
return image
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase : List[Any] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_lowerCAmelCase : Dict = mask[0].size
_lowerCAmelCase : Any = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowerCAmelCase : int = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
_lowerCAmelCase : List[Any] = np.concatenate(_lowerCamelCase , axis=0 )
_lowerCAmelCase : str = mask.astype(np.floataa ) / 255.0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Any = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
_lowerCAmelCase : Optional[int] = torch.cat(_lowerCamelCase , dim=0 )
return mask
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a, scheduler=__a)
@torch.no_grad()
def __call__( self, __a, __a, __a = 250, __a = 0.0, __a = 10, __a = 10, __a = None, __a = "pil", __a = True, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = image
_lowerCAmelCase : Any = _preprocess_image(__a)
_lowerCAmelCase : Optional[int] = original_image.to(device=self.device, dtype=self.unet.dtype)
_lowerCAmelCase : Tuple = _preprocess_mask(__a)
_lowerCAmelCase : str = mask_image.to(device=self.device, dtype=self.unet.dtype)
_lowerCAmelCase : Optional[int] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__a, __a) and len(__a) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(__a)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators.")
_lowerCAmelCase : Dict = original_image.shape
_lowerCAmelCase : Union[str, Any] = randn_tensor(__a, generator=__a, device=self.device, dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(__a, __a, __a, self.device)
_lowerCAmelCase : Any = eta
_lowerCAmelCase : str = self.scheduler.timesteps[0] + 1
_lowerCAmelCase : Optional[Any] = generator[0] if isinstance(__a, __a) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
if t < t_last:
# predict the noise residual
_lowerCAmelCase : Optional[Any] = self.unet(__a, __a).sample
# compute previous image: x_t -> x_t-1
_lowerCAmelCase : Dict = self.scheduler.step(__a, __a, __a, __a, __a, __a).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_lowerCAmelCase : List[str] = self.scheduler.undo_step(__a, __a, __a)
_lowerCAmelCase : Tuple = t
_lowerCAmelCase : int = (image / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : Optional[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowerCAmelCase : str = self.numpy_to_pil(__a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a)
| 364
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase_ ( a):
def __init__( self, __a=0.01, __a=1000):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = p_stop
_lowerCAmelCase : Any = max_length
def __iter__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : int = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCAmelCase : Tuple = random.random() < self.p_stop
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self, __a, __a, __a=False, __a=True):
'''simple docstring'''
_lowerCAmelCase : List[str] = [
BatchSamplerShard(__a, 2, __a, split_batches=__a, even_batches=__a)
for i in range(2)
]
_lowerCAmelCase : List[Any] = [list(__a) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__a) for shard in batch_sampler_shards], [len(__a) for e in expected])
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = BatchSampler(range(24), batch_size=3, drop_last=__a)
_lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Tuple = BatchSampler(range(24), batch_size=3, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCAmelCase : int = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Dict = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCAmelCase : List[str] = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCAmelCase : Optional[int] = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : Any = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Tuple = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[Any] = [[], []]
self.check_batch_sampler_shards(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = BatchSampler(range(24), batch_size=4, drop_last=__a)
_lowerCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : int = BatchSampler(range(24), batch_size=4, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCAmelCase : Dict = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : List[Any] = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCAmelCase : Optional[Any] = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : Tuple = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[str] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : Any = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BatchSampler(range(24), batch_size=3, drop_last=__a)
_lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(24), batch_size=3, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCAmelCase : Optional[int] = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Any = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCAmelCase : Optional[Any] = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Optional[Any] = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCAmelCase : List[str] = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Any = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : List[str] = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Dict = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : str = [[], []]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = BatchSampler(range(24), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : List[str] = BatchSampler(range(24), batch_size=4, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : Any = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCAmelCase : Optional[int] = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : Dict = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : Optional[Any] = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : Union[str, Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : Tuple = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : Dict = [[], []]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_lowerCAmelCase : Any = [BatchSamplerShard(__a, 2, __a, even_batches=__a) for i in range(2)]
self.assertEqual(len(batch_sampler_shards[0]), 3)
self.assertEqual(len(batch_sampler_shards[1]), 2)
self.assertListEqual(list(batch_sampler_shards[0]), [[0, 1, 2], [5, 6, 7, 8], [12, 13]])
self.assertListEqual(list(batch_sampler_shards[1]), [[3, 4], [9, 10, 11]])
def snake_case__ ( self, __a, __a, __a, __a=False, __a=2, __a=False):
'''simple docstring'''
random.seed(__a)
_lowerCAmelCase : Tuple = list(__a)
_lowerCAmelCase : Tuple = [
IterableDatasetShard(
__a, batch_size=__a, drop_last=__a, num_processes=__a, process_index=__a, split_batches=__a, )
for i in range(__a)
]
_lowerCAmelCase : Optional[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__a)
iterable_dataset_lists.append(list(__a))
_lowerCAmelCase : Tuple = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCAmelCase : Optional[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__a), len(__a))
self.assertTrue(len(__a) % shard_batch_size == 0)
_lowerCAmelCase : Union[str, Any] = []
for idx in range(0, len(__a), __a):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__a) < len(__a):
reference += reference
self.assertListEqual(__a, reference[: len(__a)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 42
_lowerCAmelCase : Any = RandomIterableDataset()
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
# Edge case with a very small dataset
_lowerCAmelCase : Optional[int] = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BatchSampler(range(16), batch_size=4, drop_last=__a)
_lowerCAmelCase : str = SkipBatchSampler(__a, 2)
self.assertListEqual(list(__a), [[8, 9, 10, 11], [12, 13, 14, 15]])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SkipDataLoader(list(range(16)), batch_size=4, skip_batches=2)
self.assertListEqual([t.tolist() for t in dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = DataLoader(list(range(16)), batch_size=4)
_lowerCAmelCase : Tuple = skip_first_batches(__a, num_batches=2)
self.assertListEqual([t.tolist() for t in new_dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = DataLoaderShard(list(range(16)), batch_size=4)
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
def snake_case__ ( self):
'''simple docstring'''
Accelerator()
_lowerCAmelCase : Dict = DataLoaderDispatcher(range(16), batch_size=4)
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
| 365
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_snake_case = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for attribute in key.split("." ):
_lowerCAmelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : List[str] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCAmelCase : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : Any = value
elif weight_type == "weight_g":
_lowerCAmelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "bias":
_lowerCAmelCase : Optional[Any] = value
else:
_lowerCAmelCase : int = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = []
_lowerCAmelCase : Any = fairseq_model.state_dict()
_lowerCAmelCase : int = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_lowerCAmelCase : int = None
for name, value in fairseq_dict.items():
_lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCAmelCase : Union[str, Any] = True
elif name.split("." )[0] == "proj":
_lowerCAmelCase : Optional[Any] = fairseq_model.proj
_lowerCAmelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCAmelCase : Optional[int] = True
if "*" in mapped_key:
_lowerCAmelCase : Any = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCAmelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : Optional[int] = "weight_g"
elif "weight_v" in name:
_lowerCAmelCase : List[Any] = "weight_v"
elif "bias" in name:
_lowerCAmelCase : Union[str, Any] = "bias"
elif "weight" in name:
_lowerCAmelCase : Any = "weight"
else:
_lowerCAmelCase : str = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = full_name.split("conv_layers." )[-1]
_lowerCAmelCase : Optional[Any] = name.split("." )
_lowerCAmelCase : Optional[Any] = int(items[0] )
_lowerCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = emb.weight.shape
_lowerCAmelCase : List[Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
_lowerCAmelCase : List[Any] = emb.weight.data
return lin_layer
def A ( _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
_lowerCAmelCase : Optional[Any] = f.readlines()
_lowerCAmelCase : Union[str, Any] = [line.split(" " )[0] for line in lines]
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : List[Any] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowerCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : str = WavaVecaConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = SpeechaTextaConfig.from_pretrained(
_lowerCamelCase , vocab_size=_lowerCamelCase , decoder_layers=_lowerCamelCase , do_stable_layer_norm=_lowerCamelCase )
_lowerCAmelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCAmelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
_lowerCAmelCase : Any = model[0].eval()
# set weights for wav2vec2 encoder
_lowerCAmelCase : List[str] = WavaVecaModel(_lowerCamelCase )
_lowerCAmelCase : int = recursively_load_weights_wavaveca(model.encoder , _lowerCamelCase )
_lowerCAmelCase : int = SpeechaTextaForCausalLM(_lowerCamelCase )
_lowerCAmelCase : Any = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
_lowerCAmelCase : Optional[int] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_lowerCAmelCase : Dict = SpeechEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
_lowerCAmelCase : Tuple = False
# add projection layer
_lowerCAmelCase : int = nn.Parameter(projection_layer.weight )
_lowerCAmelCase : int = nn.Parameter(projection_layer.bias )
_lowerCAmelCase : int = create_vocab_dict(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , "vocab.json" ) , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_lowerCamelCase , "vocab.json" ) )
tokenizer.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = hf_wavavec.config.to_dict()
_lowerCAmelCase : Union[str, Any] = tokenizer.pad_token_id
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : Any = tokenizer.eos_token_id
_lowerCAmelCase : int = "speech_to_text_2"
_lowerCAmelCase : Any = "wav2vec2"
_lowerCAmelCase : Tuple = SpeechEncoderDecoderConfig.from_dict(_lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 367
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300
| 0
|
from __future__ import annotations
_snake_case = tuple[int, int, int]
_snake_case = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_snake_case = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
_snake_case = "EGZWVONAHDCLFQMSIPJBYUKXTR"
_snake_case = "FOBHMDKEXQNRAULPGSJVTYICZW"
_snake_case = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
_snake_case = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
_snake_case = "RMDJXFUWGISLHVTCQNKYPBEZOA"
_snake_case = "SGLCPQWZHKXAREONTFBVIYJUDM"
_snake_case = "HVSICLTYKQUBXDWAJZOMFGPREN"
_snake_case = "RZWQHFMVDBKICJLNTUXAGYPSOE"
_snake_case = "LFKIJODBEGAMQPXVUHYSTCZRWN"
_snake_case = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (unique_rotsel := len(set(_lowerCamelCase ) )) < 3:
_lowerCAmelCase : Any = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_lowerCamelCase )
# Checks if rotor positions are valid
_lowerCAmelCase : Dict = rotpos
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : List[str] = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_lowerCamelCase )
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : str = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_lowerCamelCase )
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_lowerCamelCase )
# Validates string and returns dict
_lowerCAmelCase : Any = _plugboard(_lowerCamelCase )
return rotpos, rotsel, pbdict
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : int = F"Plugboard setting isn't type string ({type(_lowerCamelCase )})"
raise TypeError(_lowerCamelCase )
elif len(_lowerCamelCase ) % 2 != 0:
_lowerCAmelCase : Union[str, Any] = F"Odd number of symbols ({len(_lowerCamelCase )})"
raise Exception(_lowerCamelCase )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
_lowerCAmelCase : List[Any] = set()
for i in pbstring:
if i not in abc:
_lowerCAmelCase : str = F"'{i}' not in list of symbols"
raise Exception(_lowerCamelCase )
elif i in tmppbl:
_lowerCAmelCase : Tuple = F"Duplicate symbol ({i})"
raise Exception(_lowerCamelCase )
else:
tmppbl.add(_lowerCamelCase )
del tmppbl
# Created the dictionary
_lowerCAmelCase : List[str] = {}
for j in range(0 , len(_lowerCamelCase ) - 1 , 2 ):
_lowerCAmelCase : Optional[Any] = pbstring[j + 1]
_lowerCAmelCase : Any = pbstring[j]
return pb
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = (rotora, rotora, rotora) , _lowerCamelCase = "" , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = text.upper()
_lowerCAmelCase : Optional[int] = _validator(
_lowerCamelCase , _lowerCamelCase , plugb.upper() )
_lowerCAmelCase : Tuple = rotor_position
_lowerCAmelCase : List[str] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowerCAmelCase : str = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowerCAmelCase : Optional[Any] = plugboard[symbol]
# rotor ra --------------------------
_lowerCAmelCase : Optional[Any] = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : Tuple = rotora[index % len(_lowerCamelCase )]
# rotor rb --------------------------
_lowerCAmelCase : int = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : str = rotora[index % len(_lowerCamelCase )]
# rotor rc --------------------------
_lowerCAmelCase : List[Any] = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : Tuple = rotora[index % len(_lowerCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowerCAmelCase : Dict = reflector[symbol]
# 2nd rotors
_lowerCAmelCase : Union[str, Any] = abc[rotora.index(_lowerCamelCase ) - rotorposa]
_lowerCAmelCase : str = abc[rotora.index(_lowerCamelCase ) - rotorposa]
_lowerCAmelCase : str = abc[rotora.index(_lowerCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowerCAmelCase : Union[str, Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : int = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = "This is my Python script that emulates the Enigma machine from WWII."
_snake_case = (1, 1, 1)
_snake_case = "pictures"
_snake_case = (rotora, rotora, rotora)
_snake_case = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 368
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 0
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_snake_case = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_snake_case = {
"ctrl": 256,
}
_snake_case = {
"Pregnancy": 16_8629,
"Christianity": 7675,
"Explain": 10_6423,
"Fitness": 6_3440,
"Saving": 6_3163,
"Ask": 2_7171,
"Ass": 9_5985,
"Joke": 16_3509,
"Questions": 4_5622,
"Thoughts": 4_9605,
"Retail": 5_2342,
"Feminism": 16_4338,
"Writing": 1_1992,
"Atheism": 19_2263,
"Netflix": 4_8616,
"Computing": 3_9639,
"Opinion": 4_3213,
"Alone": 4_4967,
"Funny": 5_8917,
"Gaming": 4_0358,
"Human": 4088,
"India": 1331,
"Joker": 7_7138,
"Diet": 3_6206,
"Legal": 1_1859,
"Norman": 4939,
"Tip": 7_2689,
"Weight": 5_2343,
"Movies": 4_6273,
"Running": 2_3425,
"Science": 2090,
"Horror": 3_7793,
"Confession": 6_0572,
"Finance": 1_2250,
"Politics": 1_6360,
"Scary": 19_1985,
"Support": 1_2654,
"Technologies": 3_2516,
"Teenage": 6_6160,
"Event": 3_2769,
"Learned": 6_7460,
"Notion": 18_2770,
"Wikipedia": 3_7583,
"Books": 6665,
"Extract": 7_6050,
"Confessions": 10_2701,
"Conspiracy": 7_5932,
"Links": 6_3674,
"Narcissus": 15_0425,
"Relationship": 5_4766,
"Relationships": 13_4796,
"Reviews": 4_1671,
"News": 4256,
"Translation": 2_6820,
"multilingual": 12_8406,
}
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = set()
_lowerCAmelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Union[str, Any] = char
_lowerCAmelCase : List[Any] = set(_lowerCamelCase )
return pairs
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTROL_CODES
def __init__( self, __a, __a, __a="<unk>", **__a):
'''simple docstring'''
super().__init__(unk_token=__a, **__a)
with open(__a, encoding="utf-8") as vocab_handle:
_lowerCAmelCase : Tuple = json.load(__a)
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(__a, encoding="utf-8") as merges_handle:
_lowerCAmelCase : Union[str, Any] = merges_handle.read().split("\n")[1:-1]
_lowerCAmelCase : Dict = [tuple(merge.split()) for merge in merges]
_lowerCAmelCase : Dict = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : str = {}
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.encoder)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = tuple(__a)
_lowerCAmelCase : str = tuple(list(word[:-1]) + [word[-1] + "</w>"])
_lowerCAmelCase : Tuple = get_pairs(__a)
if not pairs:
return token
while True:
_lowerCAmelCase : str = min(__a, key=lambda __a: self.bpe_ranks.get(__a, float("inf")))
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase : List[Any] = bigram
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[int] = 0
while i < len(__a):
try:
_lowerCAmelCase : int = word.index(__a, __a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCAmelCase : Any = j
if word[i] == first and i < len(__a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCAmelCase : Dict = tuple(__a)
_lowerCAmelCase : Tuple = new_word
if len(__a) == 1:
break
else:
_lowerCAmelCase : Tuple = get_pairs(__a)
_lowerCAmelCase : List[str] = "@@ ".join(__a)
_lowerCAmelCase : Dict = word[:-4]
_lowerCAmelCase : Dict = word
return word
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Any = re.findall(R"\S+\n?", __a)
for token in words:
split_tokens.extend(list(self.bpe(__a).split(" ")))
return split_tokens
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encoder.get(__a, self.encoder.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.decoder.get(__a, self.unk_token)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = " ".join(__a).replace("@@ ", "").strip()
return out_string
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : Dict = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : int = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__a, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__a, ensure_ascii=__a) + "\n")
_lowerCAmelCase : Dict = 0
with open(__a, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __a: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_lowerCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__a) + "\n")
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 369
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_snake_case = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rag'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=True, __a=None, __a=None, __a=None, __a=None, __a=None, __a=" / ", __a=" // ", __a=5, __a=300, __a=768, __a=8, __a="wiki_dpr", __a="train", __a="compressed", __a=None, __a=None, __a=False, __a=False, __a=0.0, __a=True, __a=False, __a=False, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
bos_token_id=__a, pad_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, forced_eos_token_id=__a, is_encoder_decoder=__a, prefix=__a, vocab_size=__a, **__a, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : List[str] = kwargs.pop("question_encoder")
_lowerCAmelCase : Union[str, Any] = question_encoder_config.pop("model_type")
_lowerCAmelCase : int = kwargs.pop("generator")
_lowerCAmelCase : Optional[Any] = decoder_config.pop("model_type")
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : int = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Tuple = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : List[Any] = reduce_loss
_lowerCAmelCase : Any = label_smoothing
_lowerCAmelCase : Optional[int] = exclude_bos_score
_lowerCAmelCase : Optional[Any] = do_marginalize
_lowerCAmelCase : Any = title_sep
_lowerCAmelCase : Any = doc_sep
_lowerCAmelCase : Optional[int] = n_docs
_lowerCAmelCase : Optional[Any] = max_combined_length
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : List[str] = dataset_split
_lowerCAmelCase : Optional[Any] = index_name
_lowerCAmelCase : Dict = retrieval_vector_size
_lowerCAmelCase : Union[str, Any] = retrieval_batch_size
_lowerCAmelCase : Optional[int] = passages_path
_lowerCAmelCase : Dict = index_path
_lowerCAmelCase : Tuple = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : str = do_deduplication
_lowerCAmelCase : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator, "forced_eos_token_id", __a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.question_encoder.to_dict()
_lowerCAmelCase : Any = self.generator.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 300
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
lowerCamelCase__ = Features({'audio': Audio()})
lowerCamelCase__ = Features({'labels': ClassLabel})
lowerCamelCase__ = 'audio'
lowerCamelCase__ = 'labels'
def snake_case__ ( self, __a):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], __a):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
_lowerCAmelCase : List[Any] = copy.deepcopy(self)
_lowerCAmelCase : int = self.label_schema.copy()
_lowerCAmelCase : List[str] = features[self.label_column]
_lowerCAmelCase : Tuple = label_schema
return task_template
@property
def snake_case__ ( self):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 370
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300
| 0
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=None, __a=None, __a=0):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 1.0 if scale is None else scale
_lowerCAmelCase : Union[str, Any] = 0.0 if loc is None else loc
super().__init__(__a, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=__a)])
@property
def snake_case__ ( self):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case__ ( self):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def snake_case__ ( self):
'''simple docstring'''
return self.variance.sqrt()
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a, __a, __a, **__a):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Optional[int] = args_dim
_lowerCAmelCase : int = nn.ModuleList([nn.Linear(__a, __a) for dim in args_dim.values()])
_lowerCAmelCase : str = domain_map
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [proj(__a) for proj in self.proj]
return self.domain_map(*__a)
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Dict = function
def snake_case__ ( self, __a, *__a):
'''simple docstring'''
return self.function(__a, *__a)
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self, __a = 1):
'''simple docstring'''
_lowerCAmelCase : int = dim
_lowerCAmelCase : Union[str, Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case__ ( self, __a):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*__a)
else:
return Independent(self.distribution_class(*__a), 1)
def snake_case__ ( self, __a, __a = None, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : int = self._base_distribution(__a)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a, loc=__a, scale=__a, event_dim=self.event_dim)
@property
def snake_case__ ( self):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.event_shape)
@property
def snake_case__ ( self):
'''simple docstring'''
return 0.0
def snake_case__ ( self, __a):
'''simple docstring'''
return ParameterProjection(
in_features=__a, args_dim=self.args_dim, domain_map=LambdaLayer(self.domain_map), )
def snake_case__ ( self, *__a):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
return (x + torch.sqrt(torch.square(__a) + 4.0)) / 2.0
class UpperCAmelCase_ ( a):
lowerCamelCase__ = {'df': 1, 'loc': 1, 'scale': 1}
lowerCamelCase__ = StudentT
@classmethod
def snake_case__ ( cls, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = cls.squareplus(__a).clamp_min(torch.finfo(scale.dtype).eps)
_lowerCAmelCase : str = 2.0 + cls.squareplus(__a)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = {'loc': 1, 'scale': 1}
lowerCamelCase__ = Normal
@classmethod
def snake_case__ ( cls, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = cls.squareplus(__a).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = {'total_count': 1, 'logits': 1}
lowerCamelCase__ = NegativeBinomial
@classmethod
def snake_case__ ( cls, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = cls.squareplus(__a)
return total_count.squeeze(-1), logits.squeeze(-1)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a, logits=__a)
else:
return Independent(self.distribution_class(total_count=__a, logits=__a), 1)
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits))
| 371
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300
| 0
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = nn.functional.normalize(_lowerCamelCase )
_lowerCAmelCase : List[str] = nn.functional.normalize(_lowerCamelCase )
return torch.mm(_lowerCamelCase , normalized_text_embeds.t() )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = CLIPConfig
lowerCamelCase__ = ['CLIPEncoderLayer']
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Any = CLIPVisionModel(config.vision_config)
_lowerCAmelCase : Optional[Any] = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=__a)
_lowerCAmelCase : Any = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=__a)
_lowerCAmelCase : str = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=__a)
_lowerCAmelCase : Any = nn.Parameter(torch.ones(17), requires_grad=__a)
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.ones(3), requires_grad=__a)
@torch.no_grad()
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.vision_model(__a)[1] # pooled_output
_lowerCAmelCase : Optional[int] = self.visual_projection(__a)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : Dict = cosine_distance(__a, self.special_care_embeds).cpu().float().numpy()
_lowerCAmelCase : List[str] = cosine_distance(__a, self.concept_embeds).cpu().float().numpy()
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = image_embeds.shape[0]
for i in range(__a):
_lowerCAmelCase : List[str] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase : Any = 0.0
for concept_idx in range(len(special_cos_dist[0])):
_lowerCAmelCase : str = special_cos_dist[i][concept_idx]
_lowerCAmelCase : int = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase : List[Any] = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]})
_lowerCAmelCase : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0])):
_lowerCAmelCase : Optional[Any] = cos_dist[i][concept_idx]
_lowerCAmelCase : int = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase : Union[str, Any] = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__a)
result.append(__a)
_lowerCAmelCase : Optional[int] = [len(res["bad_concepts"]) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.vision_model(__a)[1] # pooled_output
_lowerCAmelCase : List[Any] = self.visual_projection(__a)
_lowerCAmelCase : int = cosine_distance(__a, self.special_care_embeds)
_lowerCAmelCase : Dict = cosine_distance(__a, self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase : Optional[Any] = 0.0
_lowerCAmelCase : str = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase : List[str] = torch.any(special_scores > 0, dim=1)
_lowerCAmelCase : str = special_care * 0.01
_lowerCAmelCase : int = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
_lowerCAmelCase : str = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase : str = torch.any(concept_scores > 0, dim=1)
return images, has_nsfw_concepts
| 350
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'instructblip_vision_model'
def __init__( self, __a=1408, __a=6144, __a=39, __a=16, __a=224, __a=14, __a="gelu", __a=1E-6, __a=0.0, __a=1E-10, __a=True, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : int = qkv_bias
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
cls._set_token_in_kwargs(__a)
_lowerCAmelCase : str = cls.get_config_dict(__a, **__a)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type") == "instructblip":
_lowerCAmelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__a, **__a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'instructblip_qformer'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=0.02, __a=1E-12, __a=0, __a="absolute", __a=2, __a=1408, **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=__a, **__a)
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Any = position_embedding_type
_lowerCAmelCase : int = cross_attention_frequency
_lowerCAmelCase : Optional[int] = encoder_hidden_size
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
cls._set_token_in_kwargs(__a)
_lowerCAmelCase : Optional[int] = cls.get_config_dict(__a, **__a)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type") == "instructblip":
_lowerCAmelCase : List[str] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__a, **__a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'instructblip'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=None, __a=None, __a=32, **__a):
'''simple docstring'''
super().__init__(**__a)
if vision_config is None:
_lowerCAmelCase : Union[str, Any] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values.")
if qformer_config is None:
_lowerCAmelCase : str = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.")
if text_config is None:
_lowerCAmelCase : List[Any] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
_lowerCAmelCase : Tuple = InstructBlipVisionConfig(**__a)
_lowerCAmelCase : Optional[Any] = InstructBlipQFormerConfig(**__a)
_lowerCAmelCase : Any = text_config["model_type"] if "model_type" in text_config else "opt"
_lowerCAmelCase : str = CONFIG_MAPPING[text_model_type](**__a)
_lowerCAmelCase : Optional[Any] = self.text_config.tie_word_embeddings
_lowerCAmelCase : Optional[int] = self.text_config.is_encoder_decoder
_lowerCAmelCase : Any = num_query_tokens
_lowerCAmelCase : List[Any] = self.vision_config.hidden_size
_lowerCAmelCase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase : str = 1.0
_lowerCAmelCase : Optional[int] = 0.02
@classmethod
def snake_case__ ( cls, __a, __a, __a, **__a, ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **__a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = copy.deepcopy(self.__dict__)
_lowerCAmelCase : str = self.vision_config.to_dict()
_lowerCAmelCase : str = self.qformer_config.to_dict()
_lowerCAmelCase : Union[str, Any] = self.text_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 351
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300
| 0
|
import random
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = a[left_index]
_lowerCAmelCase : Dict = left_index + 1
for j in range(left_index + 1 , _lowerCamelCase ):
if a[j] < pivot:
_lowerCAmelCase : Union[str, Any] = a[i], a[j]
i += 1
_lowerCAmelCase : List[Any] = a[i - 1], a[left_index]
return i - 1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if left < right:
_lowerCAmelCase : Optional[int] = random.randint(_lowerCamelCase , right - 1 )
_lowerCAmelCase : Any = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_lowerCAmelCase : Tuple = partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
quick_sort_random(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_lowerCamelCase , pivot_index + 1 , _lowerCamelCase ) # recursive quicksort to the right of the pivot point
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = input("Enter numbers separated by a comma:\n" ).strip()
_lowerCAmelCase : str = [int(_lowerCamelCase ) for item in user_input.split("," )]
quick_sort_random(_lowerCamelCase , 0 , len(_lowerCamelCase ) )
print(_lowerCamelCase )
if __name__ == "__main__":
main()
| 352
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 0
|
from math import isclose, sqrt
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = point_y / 4 / point_x
_lowerCAmelCase : Any = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_lowerCAmelCase : List[str] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_lowerCAmelCase : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_lowerCAmelCase : Any = outgoing_gradient**2 + 4
_lowerCAmelCase : Optional[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_lowerCAmelCase : Tuple = (point_y - outgoing_gradient * point_x) ** 2 - 100
_lowerCAmelCase : Any = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_lowerCAmelCase : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_lowerCAmelCase : str = x_minus if isclose(_lowerCamelCase , _lowerCamelCase ) else x_plus
_lowerCAmelCase : Any = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def A ( _lowerCamelCase = 1.4 , _lowerCamelCase = -9.6 ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : float = first_x_coord
_lowerCAmelCase : float = first_y_coord
_lowerCAmelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_lowerCAmelCase : int = next_point(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 353
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300
| 0
|
"""simple docstring"""
import os
import sys
import unittest
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_snake_case = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
_snake_case = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_test_to_tester_mapping(__a)
_lowerCAmelCase : Tuple = get_test_to_tester_mapping(__a)
_lowerCAmelCase : Union[str, Any] = {"BertModelTest": "BertModelTester"}
_lowerCAmelCase : Union[str, Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__a), __a)
self.assertEqual(get_test_info.to_json(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = get_model_to_test_mapping(__a)
_lowerCAmelCase : int = get_model_to_test_mapping(__a)
_lowerCAmelCase : str = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_lowerCAmelCase : str = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__a), __a)
self.assertEqual(get_test_info.to_json(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = get_model_to_tester_mapping(__a)
_lowerCAmelCase : Tuple = get_model_to_tester_mapping(__a)
_lowerCAmelCase : Tuple = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_lowerCAmelCase : Any = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__a), __a)
self.assertEqual(get_test_info.to_json(__a), __a)
| 354
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 355
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300
| 0
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_snake_case = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_snake_case = "main"
# Default branch name
_snake_case = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
_snake_case = "aaaaaaa"
# This commit does not exist, so we should 404.
_snake_case = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
_snake_case = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def A ( ):
'''simple docstring'''
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def A ( ):
'''simple docstring'''
print("Bonjour!" )
yield
print("Au revoir!" )
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers") is not None
class UpperCAmelCase_ ( unittest.TestCase):
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def snake_case__ ( self, __a):
'''simple docstring'''
with ContextManagers([]):
print("Transformers are awesome!")
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue(), "Transformers are awesome!\n")
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def snake_case__ ( self, __a):
'''simple docstring'''
with ContextManagers([context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue(), "Welcome!\nTransformers are awesome!\nBye!\n")
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def snake_case__ ( self, __a):
'''simple docstring'''
with ContextManagers([context_fr(), context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue(), "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n")
@require_torch
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(find_labels(__a), ["labels"])
self.assertEqual(find_labels(__a), ["labels", "next_sentence_label"])
self.assertEqual(find_labels(__a), ["start_positions", "end_positions"])
class UpperCAmelCase_ ( a):
pass
self.assertEqual(find_labels(__a), ["labels"])
@require_tf
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(find_labels(__a), ["labels"])
self.assertEqual(find_labels(__a), ["labels", "next_sentence_label"])
self.assertEqual(find_labels(__a), ["start_positions", "end_positions"])
class UpperCAmelCase_ ( a):
pass
self.assertEqual(find_labels(__a), ["labels"])
@require_flax
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(find_labels(__a), [])
self.assertEqual(find_labels(__a), [])
self.assertEqual(find_labels(__a), [])
class UpperCAmelCase_ ( a):
pass
self.assertEqual(find_labels(__a), [])
| 356
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 0
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 357
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 358
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300
| 0
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 359
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300
| 0
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_snake_case = logging.getLogger(__name__)
_snake_case = 50 # max width of layer names
_snake_case = 70 # max width of quantizer names
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=_lowerCamelCase , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=_lowerCamelCase , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=_lowerCamelCase , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=_lowerCamelCase , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=_lowerCamelCase , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=_lowerCamelCase , type=_lowerCamelCase , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=_lowerCamelCase , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def A ( _lowerCamelCase ):
'''simple docstring'''
if args.calibrator == "max":
_lowerCAmelCase : Any = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
_lowerCAmelCase : Tuple = "histogram"
elif args.calibrator == "mse":
_lowerCAmelCase : str = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
_lowerCAmelCase : int = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase )
_lowerCAmelCase : List[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False ):
'''simple docstring'''
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ["embeddings"] , which="weight" , _disabled=_lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [""] , _disabled=_lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=_lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=_lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase )
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def fusea(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
_lowerCAmelCase : Optional[int] = qq._amax.detach().item()
_lowerCAmelCase : int = qk._amax.detach().item()
_lowerCAmelCase : Optional[Any] = qv._amax.detach().item()
_lowerCAmelCase : Tuple = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
qq._amax.fill_(_lowerCamelCase )
qk._amax.fill_(_lowerCamelCase )
qv._amax.fill_(_lowerCamelCase )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
_lowerCAmelCase : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase )
_lowerCAmelCase : str = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
_lowerCAmelCase : Dict = mod.weight.shape[0]
_lowerCAmelCase : Any = mod._weight_quantizer._amax.detach()
_lowerCAmelCase : List[Any] = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_lowerCAmelCase : Any = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_lowerCAmelCase : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
_lowerCAmelCase : List[Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
_lowerCAmelCase : Tuple = amax
def A ( _lowerCamelCase , _lowerCamelCase=25 , _lowerCamelCase=180 , _lowerCamelCase=None ):
'''simple docstring'''
if ignore is None:
_lowerCAmelCase : Tuple = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = [ignore]
_lowerCAmelCase : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , "weight" ):
continue
_lowerCAmelCase : Any = max(_lowerCamelCase , len(_lowerCamelCase ) )
for name, mod in model.named_modules():
_lowerCAmelCase : Tuple = getattr(_lowerCamelCase , "_input_quantizer" , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = getattr(_lowerCamelCase , "_weight_quantizer" , _lowerCamelCase )
if not hasattr(_lowerCamelCase , "weight" ):
continue
if type(_lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase ) is str and s in name]:
continue
_lowerCAmelCase : Any = F"Act:{input_q.extra_repr()}"
_lowerCAmelCase : List[Any] = F"Wgt:{weight_q.extra_repr()}"
_lowerCAmelCase : Any = F"{name:{name_width}} {act_str} {wgt_str}"
if len(_lowerCamelCase ) <= line_width:
logger.info(_lowerCamelCase )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
logger.warning(F"{name} has no {quantizer}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="both" , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , "_input_quantizer" , _lowerCamelCase , _lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , "_weight_quantizer" , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_input_quantizer" ) or hasattr(_lowerCamelCase , "_weight_quantizer" ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
| 360
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 361
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.