code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionDiffEditPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ : str = frozenset([] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
UpperCAmelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
UpperCAmelCase_ : str = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_zero=lowercase_ , )
torch.manual_seed(0 )
UpperCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ : Optional[int] = CLIPTextModel(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : int = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(lowercase_ ) ).convert("RGB" )
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : int = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Tuple = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
UpperCAmelCase_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Dict = Image.fromarray(np.uinta(lowercase_ ) ).convert("RGB" )
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.pipeline_class , "_optional_components" ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : List[str] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[Any] = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase_ : Dict = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase_ : Optional[int] = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase_ : Optional[Any] = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(lowercase_ , 1E-4 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = "cpu"
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Dict = self.get_dummy_mask_inputs(lowercase_ )
UpperCAmelCase_ : str = pipe.generate_mask(**lowercase_ )
UpperCAmelCase_ : Optional[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : Union[str, Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = "cpu"
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Dict = self.get_dummy_inversion_inputs(lowercase_ )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**lowercase_ ).images
UpperCAmelCase_ : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : Optional[int] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
UpperCAmelCase_ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_ , 1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = "cpu"
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**lowercase_ )
UpperCAmelCase_ : Dict = DPMSolverMultistepInverseScheduler(**lowercase_ )
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : str = self.get_dummy_inversion_inputs(lowercase_ )
UpperCAmelCase_ : Any = pipe.invert(**lowercase_ ).images
UpperCAmelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : Optional[Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
UpperCAmelCase_ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_ , 1E-3 )
@require_torch_gpu
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
UpperCAmelCase_ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
UpperCAmelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
UpperCAmelCase_ : List[Any] = raw_image
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : int = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=lowercase_ , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Optional[Any] = "a bowl of fruit"
UpperCAmelCase_ : Any = "a bowl of pears"
UpperCAmelCase_ : List[str] = pipe.generate_mask(
image=self.raw_image , source_prompt=lowercase_ , target_prompt=lowercase_ , generator=lowercase_ , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=lowercase_ , image=self.raw_image , inpaint_strength=0.7 , generator=lowercase_ ).latents
UpperCAmelCase_ : Tuple = pipe(
prompt=lowercase_ , mask_image=lowercase_ , image_latents=lowercase_ , generator=lowercase_ , negative_prompt=lowercase_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
UpperCAmelCase_ : List[Any] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=lowercase_ , torch_dtype=torch.floataa )
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : List[str] = "a bowl of fruit"
UpperCAmelCase_ : Optional[Any] = "a bowl of pears"
UpperCAmelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=lowercase_ , target_prompt=lowercase_ , generator=lowercase_ , )
UpperCAmelCase_ : Tuple = pipe.invert(
prompt=lowercase_ , image=self.raw_image , inpaint_strength=0.7 , generator=lowercase_ , num_inference_steps=25 , ).latents
UpperCAmelCase_ : List[str] = pipe(
prompt=lowercase_ , mask_image=lowercase_ , image_latents=lowercase_ , generator=lowercase_ , negative_prompt=lowercase_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
UpperCAmelCase_ : List[Any] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 61
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
__magic_name__ : List[Any] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Optional[Any] = use_attention_mask
__magic_name__ : Optional[Any] = use_token_type_ids
__magic_name__ : int = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Any = intermediate_size
__magic_name__ : List[Any] = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : List[Any] = num_choices
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_attention_mask:
__magic_name__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : str = None
if self.use_token_type_ids:
__magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = config_and_inputs
__magic_name__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = config_and_inputs
__magic_name__ : Tuple = True
__magic_name__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_class_name in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : List[str] = model(_a )[0]
__magic_name__ : str = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _a )
# compare the actual values for a slice.
__magic_name__ : List[str] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Tuple = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Dict = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 281
| 0
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 363
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 0
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __snake_case :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=False , snake_case__=True , snake_case__="None" , snake_case__=3 , snake_case__=4 , snake_case__=None , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] =parent
UpperCAmelCase : str =batch_size
UpperCAmelCase : int =seq_length
UpperCAmelCase : int =is_training
UpperCAmelCase : Optional[int] =use_input_mask
UpperCAmelCase : List[Any] =use_token_type_ids
UpperCAmelCase : List[str] =use_labels
UpperCAmelCase : Optional[int] =vocab_size
UpperCAmelCase : int =hidden_size
UpperCAmelCase : str =num_hidden_layers
UpperCAmelCase : Any =num_attention_heads
UpperCAmelCase : Optional[Any] =intermediate_size
UpperCAmelCase : Tuple =hidden_act
UpperCAmelCase : Optional[int] =hidden_dropout_prob
UpperCAmelCase : List[Any] =attention_probs_dropout_prob
UpperCAmelCase : List[Any] =max_position_embeddings
UpperCAmelCase : str =type_vocab_size
UpperCAmelCase : Optional[Any] =type_sequence_label_size
UpperCAmelCase : Dict =initializer_range
UpperCAmelCase : List[str] =num_labels
UpperCAmelCase : Optional[Any] =num_choices
UpperCAmelCase : Dict =relative_attention
UpperCAmelCase : Union[str, Any] =position_biased_input
UpperCAmelCase : Any =pos_att_type
UpperCAmelCase : Optional[int] =scope
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[Any] =None
if self.use_input_mask:
UpperCAmelCase : Any =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[int] =None
if self.use_token_type_ids:
UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : int =None
UpperCAmelCase : List[str] =None
if self.use_labels:
UpperCAmelCase : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Tuple =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=snake_case__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any:
'''simple docstring'''
UpperCAmelCase : str =TFDebertaVaModel(config=snake_case__ )
UpperCAmelCase : Any ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase : List[str] =[input_ids, input_mask]
UpperCAmelCase : Union[str, Any] =model(snake_case__ )
UpperCAmelCase : int =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple =TFDebertaVaForMaskedLM(config=snake_case__ )
UpperCAmelCase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase : Optional[Any] =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] =self.num_labels
UpperCAmelCase : List[Any] =TFDebertaVaForSequenceClassification(config=snake_case__ )
UpperCAmelCase : Any ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase : str =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.num_labels
UpperCAmelCase : Optional[int] =TFDebertaVaForTokenClassification(config=snake_case__ )
UpperCAmelCase : Optional[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase : Any =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =TFDebertaVaForQuestionAnswering(config=snake_case__ )
UpperCAmelCase : Dict ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase : Union[str, Any] =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[Any] =config_and_inputs
UpperCAmelCase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase : Union[str, Any] = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Any = False
__lowerCamelCase : Any = False
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : str =TFDebertaVaModelTester(self )
UpperCAmelCase : Union[str, Any] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] =TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(snake_case__ )
@require_tf
class __snake_case ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
UpperCAmelCase : Any =tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCAmelCase : Tuple =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase : Dict =model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase : Optional[int] =tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 )
| 348
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = """sew-d"""
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=2 , snake_case__=512 , snake_case__=256 , snake_case__=True , snake_case__=True , snake_case__=("p2c", "c2p") , snake_case__="layer_norm" , snake_case__="gelu_python" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-7 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=0 , snake_case__=1 , snake_case__=2 , **snake_case__ , ) -> int:
'''simple docstring'''
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
UpperCAmelCase : Union[str, Any] =hidden_size
UpperCAmelCase : Union[str, Any] =feat_extract_norm
UpperCAmelCase : Optional[Any] =feat_extract_activation
UpperCAmelCase : List[str] =list(snake_case__ )
UpperCAmelCase : int =list(snake_case__ )
UpperCAmelCase : List[str] =list(snake_case__ )
UpperCAmelCase : str =conv_bias
UpperCAmelCase : Tuple =num_conv_pos_embeddings
UpperCAmelCase : Dict =num_conv_pos_embedding_groups
UpperCAmelCase : str =len(self.conv_dim )
UpperCAmelCase : Dict =num_hidden_layers
UpperCAmelCase : Optional[int] =intermediate_size
UpperCAmelCase : List[Any] =squeeze_factor
UpperCAmelCase : str =max_position_embeddings
UpperCAmelCase : int =position_buckets
UpperCAmelCase : Optional[int] =share_att_key
UpperCAmelCase : Optional[int] =relative_attention
UpperCAmelCase : Tuple =norm_rel_ebd
UpperCAmelCase : List[Any] =list(snake_case__ )
UpperCAmelCase : Dict =hidden_act
UpperCAmelCase : Optional[int] =num_attention_heads
UpperCAmelCase : Any =hidden_dropout
UpperCAmelCase : str =attention_dropout
UpperCAmelCase : Union[str, Any] =activation_dropout
UpperCAmelCase : str =feat_proj_dropout
UpperCAmelCase : Union[str, Any] =final_dropout
UpperCAmelCase : Optional[int] =layer_norm_eps
UpperCAmelCase : str =feature_layer_norm_eps
UpperCAmelCase : str =initializer_range
UpperCAmelCase : Any =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Union[str, Any] =apply_spec_augment
UpperCAmelCase : Optional[Any] =mask_time_prob
UpperCAmelCase : Tuple =mask_time_length
UpperCAmelCase : str =mask_time_min_masks
UpperCAmelCase : Optional[int] =mask_feature_prob
UpperCAmelCase : Optional[Any] =mask_feature_length
UpperCAmelCase : List[Any] =mask_feature_min_masks
# ctc loss
UpperCAmelCase : str =ctc_loss_reduction
UpperCAmelCase : Optional[int] =ctc_zero_infinity
# sequence classification
UpperCAmelCase : Union[str, Any] =use_weighted_layer_sum
UpperCAmelCase : int =classifier_proj_size
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348
| 1
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCamelCase (_SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : set , _SCREAMING_SNAKE_CASE : set , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : PriorityQueue , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__a : str = cst_fwd.get(__UpperCamelCase , np.inf )
__a : Optional[int] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__a : str = new_cost_f
__a : Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__a : List[str] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict ):
__a : List[str] = -1
__a : Any = set()
__a : List[str] = set()
__a : Tuple = {source: 0}
__a : Optional[Any] = {destination: 0}
__a : int = {source: None}
__a : Optional[int] = {destination: None}
__a : Union[str, Any] = PriorityQueue()
__a : Any = PriorityQueue()
__a : Any = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__a , __a : str = queue_forward.get()
visited_forward.add(__UpperCamelCase )
__a , __a : Dict = queue_backward.get()
visited_backward.add(__UpperCamelCase )
__a : Optional[int] = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
__a : int = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__a : Tuple = shortest_distance
return shortest_path_distance
__lowercase : Optional[int] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
__lowercase : Union[str, Any] = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
a_ = list[tuple[int, int]]
a_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : Any , __lowercase : str , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pos_x
SCREAMING_SNAKE_CASE__ : Any =pos_y
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(pos_y, pos_x)
SCREAMING_SNAKE_CASE__ : int =goal_x
SCREAMING_SNAKE_CASE__ : List[Any] =goal_y
SCREAMING_SNAKE_CASE__ : Optional[Any] =parent
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Dict , __lowercase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =Node(start[1] , start[0] , goal[1] , goal[0] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Node(goal[1] , goal[0] , goal[1] , goal[0] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int =[self.start]
SCREAMING_SNAKE_CASE__ : List[str] =False
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
while self.node_queue:
SCREAMING_SNAKE_CASE__ : str =self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE__ : Tuple =True
return self.retrace_path(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_successors(_UpperCAmelCase )
for node in successors:
self.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def __magic_name__ ( self : int , __lowercase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[Any] =[]
for action in delta:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parent.pos_x + action[1]
SCREAMING_SNAKE_CASE__ : Optional[int] =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_UpperCAmelCase , _UpperCAmelCase , self.target.pos_y , self.target.pos_x , _UpperCAmelCase ) )
return successors
def __magic_name__ ( self : Any , __lowercase : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[str] =node
SCREAMING_SNAKE_CASE__ : int =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE__ : Tuple =current_node.parent
path.reverse()
return path
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : str , __lowercase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[Any] =BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any =BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =False
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE__ : List[str] =self.fwd_bfs.node_queue.pop(0 )
SCREAMING_SNAKE_CASE__ : int =self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE__ : int =True
return self.retrace_bidirectional_path(
_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =current_bwd_node
SCREAMING_SNAKE_CASE__ : int =current_fwd_node
SCREAMING_SNAKE_CASE__ : int ={
self.fwd_bfs: self.fwd_bfs.get_successors(_UpperCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(_UpperCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __magic_name__ ( self : Tuple , __lowercase : Dict , __lowercase : int ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.fwd_bfs.retrace_path(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.bwd_bfs.retrace_path(_UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE__ : Dict =fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a_ = (0, 0)
a_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a_ = time.time()
a_ = BreadthFirstSearch(init, goal)
a_ = bfs.search()
a_ = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
a_ = time.time()
a_ = BidirectionalBreadthFirstSearch(init, goal)
a_ = bd_bfs.search()
a_ = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 152
|
"""simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = False ) -> float:
if not arr:
return 0
lowercase__: Any = 0 if allow_empty_subarrays else float('''-inf''' )
lowercase__: Union[str, Any] = 0.0
for num in arr:
lowercase__: List[str] = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase__: int = max(__UpperCAmelCase , __UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 177
| 0
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowercase__ ( UpperCamelCase_):
def __lt__( self : List[str] , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self : int , UpperCamelCase__ : Any ):
'''simple docstring'''
return self[-1] == other[-1]
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : list[Stack] = []
# sort into stacks
for element in collection:
SCREAMING_SNAKE_CASE : List[str] = Stack([element] )
SCREAMING_SNAKE_CASE : int = bisect_left(_lowercase , _lowercase )
if i != len(_lowercase ):
stacks[i].append(_lowercase )
else:
stacks.append(_lowercase )
# use a heap-based merge to merge stack efficiently
SCREAMING_SNAKE_CASE : Dict = merge(*(reversed(_lowercase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
__UpperCamelCase : int = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 360
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *_lowercase ):
with open(_lowercase , '''r''' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__UpperCamelCase : Union[str, Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__UpperCamelCase : Any = torch.device('cuda', local_rank)
__UpperCamelCase : Union[str, Any] = socket.gethostname()
__UpperCamelCase : Tuple = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__UpperCamelCase : List[Any] = dist.get_rank()
__UpperCamelCase : List[Any] = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 258
| 0
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase_ ( _UpperCAmelCase = "" ):
"""simple docstring"""
A_ : Optional[int] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
A_ : str = BeautifulSoup(requests.get(_UpperCAmelCase ).text , '''html.parser''' )
A_ : List[Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
A_ : List[str] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_UpperCAmelCase , _UpperCAmelCase )
}
def lowercase_ ( _UpperCAmelCase = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
A_ : Any = get_imdb_top_aaa_movies()
with open(_UpperCAmelCase , '''w''' , newline='''''' ) as out_file:
A_ : List[Any] = csv.writer(_UpperCAmelCase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 167
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCamelCase : Tuple = 'Create a default config file for Accelerate with only a few flags set.'
def lowercase_ ( _UpperCAmelCase="no" , _UpperCAmelCase = default_json_config_file , _UpperCAmelCase = False ):
"""simple docstring"""
A_ : str = Path(_UpperCAmelCase )
path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
A_ : Optional[Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
A_ : str = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
A_ : int = torch.cuda.device_count()
A_ : int = num_gpus
A_ : Tuple = False
if num_gpus > 1:
A_ : Optional[int] = '''MULTI_GPU'''
else:
A_ : Union[str, Any] = '''NO'''
elif is_xpu_available() and use_xpu:
A_ : str = torch.xpu.device_count()
A_ : Optional[int] = num_xpus
A_ : List[str] = False
if num_xpus > 1:
A_ : Any = '''MULTI_XPU'''
else:
A_ : Optional[Any] = '''NO'''
elif is_npu_available():
A_ : Union[str, Any] = torch.npu.device_count()
A_ : Optional[int] = num_npus
A_ : Union[str, Any] = False
if num_npus > 1:
A_ : List[str] = '''MULTI_NPU'''
else:
A_ : Tuple = '''NO'''
else:
A_ : Union[str, Any] = 0
A_ : str = True
A_ : str = 1
A_ : List[Any] = '''NO'''
A_ : Dict = ClusterConfig(**_UpperCAmelCase )
config.to_json_file(_UpperCAmelCase )
return path
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = parser.add_parser('''default''' , parents=_UpperCAmelCase , help=_UpperCAmelCase , formatter_class=_UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=_UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_UpperCAmelCase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_UpperCAmelCase )
return parser
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 167
| 1
|
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCamelCase_ :
def _lowercase( self , A ) -> Dict:
raise NotImplementedError()
def _lowercase( self ) -> Union[str, Any]:
raise NotImplementedError()
class UpperCamelCase_ ( A_ ):
def __init__( self , A , A = False , **A ) -> str:
UpperCAmelCase : str = tokenizer
UpperCAmelCase : List[Any] = skip_prompt
UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase : Tuple = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Optional[int] = True
def _lowercase( self , A ) -> Optional[int]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
UpperCAmelCase : Dict = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase : str = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
UpperCAmelCase : Dict = text[self.print_len :]
UpperCAmelCase : Dict = []
UpperCAmelCase : Optional[Any] = 0
# If the last token is a CJK character, we print the characters.
elif len(snake_case__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase : str = text[self.print_len :]
self.print_len += len(snake_case__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase : int = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(snake_case__ )
self.on_finalized_text(snake_case__ )
def _lowercase( self ) -> List[Any]:
if len(self.token_cache ) > 0:
UpperCAmelCase : Dict = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCAmelCase : int = text[self.print_len :]
UpperCAmelCase : Tuple = []
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Tuple = ""
UpperCAmelCase : List[str] = True
self.on_finalized_text(snake_case__ , stream_end=snake_case__ )
def _lowercase( self , A , A = False ) -> Tuple:
print(snake_case__ , flush=snake_case__ , end="""""" if not stream_end else None )
def _lowercase( self , A ) -> Optional[int]:
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
class UpperCamelCase_ ( A_ ):
def __init__( self , A , A = False , A = None , **A ) -> List[Any]:
super().__init__(snake_case__ , snake_case__ , **snake_case__ )
UpperCAmelCase : Tuple = Queue()
UpperCAmelCase : Dict = None
UpperCAmelCase : Dict = timeout
def _lowercase( self , A , A = False ) -> List[str]:
self.text_queue.put(snake_case__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Tuple:
return self
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 360
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a : int = logging.get_logger(__name__)
a : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
a : Tuple = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
a : Optional[int] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'whisper'
lowercase = ['past_key_values']
lowercase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A=51865 , A=80 , A=6 , A=4 , A=6 , A=4 , A=1536 , A=1536 , A=0.0 , A=0.0 , A=50257 , A=True , A=True , A="gelu" , A=256 , A=0.0 , A=0.0 , A=0.0 , A=0.0_2 , A=False , A=1500 , A=448 , A=50256 , A=50256 , A=50256 , A=None , A=[220, 50256] , A=False , A=256 , A=False , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A=7 , **A , ) -> Optional[Any]:
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Union[str, Any] = num_mel_bins
UpperCAmelCase : Tuple = d_model
UpperCAmelCase : Optional[int] = encoder_layers
UpperCAmelCase : List[str] = encoder_attention_heads
UpperCAmelCase : Optional[int] = decoder_layers
UpperCAmelCase : int = decoder_attention_heads
UpperCAmelCase : Optional[int] = decoder_ffn_dim
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : List[str] = dropout
UpperCAmelCase : Optional[Any] = attention_dropout
UpperCAmelCase : Optional[Any] = activation_dropout
UpperCAmelCase : Optional[Any] = activation_function
UpperCAmelCase : Optional[Any] = init_std
UpperCAmelCase : int = encoder_layerdrop
UpperCAmelCase : Dict = decoder_layerdrop
UpperCAmelCase : Optional[int] = use_cache
UpperCAmelCase : List[str] = encoder_layers
UpperCAmelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Union[str, Any] = max_source_positions
UpperCAmelCase : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : List[str] = classifier_proj_size
UpperCAmelCase : Optional[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : int = mask_time_prob
UpperCAmelCase : int = mask_time_length
UpperCAmelCase : Dict = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Optional[int] = mask_feature_length
UpperCAmelCase : int = mask_feature_min_masks
UpperCAmelCase : List[Any] = median_filter_width
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , suppress_tokens=A , begin_suppress_tokens=A , **A , )
class UpperCamelCase_ ( __magic_name__ ):
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : str = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase : List[Any] = {0: """batch"""}
else:
UpperCAmelCase : Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
return common_inputs
def _lowercase( self , A , A = -1 , A = -1 , A = False , A = None , A = 22050 , A = 5.0 , A = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Optional[int] = OrderedDict()
UpperCAmelCase : Any = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=A , framework=A , sampling_rate=A , time_duration=A , frequency=A , )
UpperCAmelCase : List[str] = encoder_inputs["""input_features"""].shape[2]
UpperCAmelCase : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Any = super().generate_dummy_inputs(
preprocessor.tokenizer , A , A , A , A )
UpperCAmelCase : List[str] = encoder_inputs.pop("""input_features""" )
UpperCAmelCase : Any = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _lowercase( self ) -> float:
return 1e-3
| 338
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'pix2struct_text_model'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any],lowercase_ : Union[str, Any]=5_0_2_4_4,lowercase_ : Optional[int]=7_6_8,lowercase_ : Tuple=6_4,lowercase_ : Optional[Any]=2_0_4_8,lowercase_ : Tuple=1_2,lowercase_ : List[Any]=1_2,lowercase_ : Union[str, Any]=3_2,lowercase_ : Any=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : Tuple=1E-6,lowercase_ : List[str]=1.0,lowercase_ : str="gelu_new",lowercase_ : int=0,lowercase_ : Union[str, Any]=False,lowercase_ : Tuple=0,lowercase_ : Optional[Any]=1,lowercase_ : List[Any]=False,lowercase_ : Any=True,**lowercase_ : Dict,)-> int:
'''simple docstring'''
A__ = vocab_size
A__ = hidden_size
A__ = d_kv
A__ = d_ff
A__ = num_layers
A__ = num_heads
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = initializer_factor
A__ = use_cache
A__ = eos_token_id
A__ = decoder_start_token_id
# for backwards compatibility
A__ = dense_act_fn
super().__init__(
pad_token_id=lowercase_,eos_token_id=lowercase_,decoder_start_token_id=lowercase_,tie_word_embeddings=lowercase_,is_decoder=lowercase_,**lowercase_,)
@classmethod
def snake_case__ ( cls : str,lowercase_ : Union[str, os.PathLike],**lowercase_ : int )-> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_ )
A__ , A__ = cls.get_config_dict(lowercase_,**lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
A__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase_,**lowercase_ )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'pix2struct_vision_model'
def __init__( self : int,lowercase_ : str=7_6_8,lowercase_ : Optional[Any]=7_6_8,lowercase_ : Dict=2_0_4_8,lowercase_ : List[Any]=6_4,lowercase_ : int=1_2,lowercase_ : List[str]=1_2,lowercase_ : List[Any]="gelu_new",lowercase_ : List[str]=1E-6,lowercase_ : Optional[Any]=0.0,lowercase_ : Optional[Any]=0.0,lowercase_ : Union[str, Any]=1E-10,lowercase_ : Optional[Any]=1.0,lowercase_ : List[Any]=4_0_9_6,lowercase_ : Union[str, Any]=3_2,lowercase_ : Dict=1_2_8,**lowercase_ : Dict,)-> List[Any]:
'''simple docstring'''
super().__init__(**lowercase_ )
A__ = hidden_size
A__ = patch_embed_hidden_size
A__ = d_ff
A__ = dropout_rate
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = initializer_range
A__ = initializer_factor
A__ = attention_dropout
A__ = layer_norm_eps
A__ = dense_act_fn
A__ = seq_len
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = d_kv
@classmethod
def snake_case__ ( cls : List[str],lowercase_ : Union[str, os.PathLike],**lowercase_ : Dict )-> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_ )
A__ , A__ = cls.get_config_dict(lowercase_,**lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
A__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase_,**lowercase_ )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'pix2struct'
lowerCamelCase = True
def __init__( self : Any,lowercase_ : Tuple=None,lowercase_ : List[str]=None,lowercase_ : Optional[int]=1.0,lowercase_ : Optional[int]=0.02,lowercase_ : Tuple=False,lowercase_ : int=False,lowercase_ : Any=True,**lowercase_ : Tuple,)-> Any:
'''simple docstring'''
super().__init__(tie_word_embeddings=lowercase_,is_encoder_decoder=lowercase_,**lowercase_ )
if text_config is None:
A__ = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
A__ = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
A__ = PixaStructTextConfig(**lowercase_ )
A__ = PixaStructVisionConfig(**lowercase_ )
A__ = self.text_config.decoder_start_token_id
A__ = self.text_config.pad_token_id
A__ = self.text_config.eos_token_id
A__ = initializer_factor
A__ = initializer_range
A__ = self.initializer_range
A__ = self.initializer_range
A__ = is_vqa
@classmethod
def snake_case__ ( cls : List[Any],lowercase_ : PixaStructTextConfig,lowercase_ : PixaStructVisionConfig,**lowercase_ : int )-> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict(),vision_config=vision_config.to_dict(),**lowercase_ )
def snake_case__ ( self : Tuple )-> Any:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 7
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase_ = "true"
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=82 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 ) -> Optional[Any]:
'''simple docstring'''
set_seed(42 )
A__ = RegressionModel()
A__ = deepcopy(SCREAMING_SNAKE_CASE__ )
A__ = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
A__ = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
model.to(accelerator.device )
A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model, ddp_model, dataloader
def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
A__ = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
with accelerator.main_process_first():
A__ = dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
A__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str:
'''simple docstring'''
A__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ )
A__ = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches )
A__ = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ )
A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
'''simple docstring'''
A__ = []
for batch in dataloader:
A__ , A__ = batch.values()
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ , A__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A__ , A__ = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE__ )
targs.append(SCREAMING_SNAKE_CASE__ )
A__ , A__ = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ )
return logits, targs
def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int=82 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=16 ) -> List[Any]:
'''simple docstring'''
A__ , A__ , A__ = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ , A__ = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert (
len(SCREAMING_SNAKE_CASE__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}'
def _snake_case( SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False ) -> str:
'''simple docstring'''
A__ = evaluate.load('glue' , 'mrpc' )
A__ , A__ = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# First do baseline
A__ , A__ , A__ = setup['no']
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE__ )
with torch.inference_mode():
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch['labels'] )
A__ = metric.compute()
# Then do distributed
A__ , A__ , A__ = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits.argmax(dim=-1 )
A__ = batch['labels']
A__ , A__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ )
A__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def _snake_case( ) -> Optional[Any]:
'''simple docstring'''
A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
A__ = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE__ , 512 )
accelerator.state._reset_state()
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 7
| 1
|
"""simple docstring"""
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = name
snake_case = val
def __str__( self ):
"""simple docstring"""
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , lowerCAmelCase ):
"""simple docstring"""
return self.val < other.val
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = {}
snake_case = {}
snake_case = self.build_heap(lowerCAmelCase )
def __getitem__( self , lowerCAmelCase ):
"""simple docstring"""
return self.get_value(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return (idx - 1) // 2
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return idx * 2 + 1
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return idx * 2 + 2
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.heap_dict[key]
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = len(lowerCAmelCase ) - 1
snake_case = self.get_parent_idx(lowerCAmelCase )
for idx, i in enumerate(lowerCAmelCase ):
snake_case = idx
snake_case = i.val
for i in range(lowerCAmelCase , -1 , -1 ):
self.sift_down(lowerCAmelCase , lowerCAmelCase )
return array
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
while True:
snake_case = self.get_left_child_idx(lowerCAmelCase ) # noqa: E741
snake_case = self.get_right_child_idx(lowerCAmelCase )
snake_case = idx
if l < len(lowerCAmelCase ) and array[l] < array[idx]:
snake_case = l
if r < len(lowerCAmelCase ) and array[r] < array[smallest]:
snake_case = r
if smallest != idx:
snake_case = array[smallest], array[idx]
(
snake_case
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case = smallest
else:
break
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.get_parent_idx(lowerCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case = self.heap[idx], self.heap[p]
snake_case = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case = p
snake_case = self.get_parent_idx(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
return self.heap[0]
def snake_case ( self ):
"""simple docstring"""
snake_case = self.heap[-1], self.heap[0]
snake_case = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.heap.append(lowerCAmelCase )
snake_case = len(self.heap ) - 1
snake_case = node.val
self.sift_up(len(self.heap ) - 1 )
def snake_case ( self ):
"""simple docstring"""
return len(self.heap ) == 0
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case = new_value
snake_case = new_value
self.sift_up(self.idx_of_element[node] )
SCREAMING_SNAKE_CASE__ = Node("R", -1)
SCREAMING_SNAKE_CASE__ = Node("B", 6)
SCREAMING_SNAKE_CASE__ = Node("A", 3)
SCREAMING_SNAKE_CASE__ = Node("X", 1)
SCREAMING_SNAKE_CASE__ = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
SCREAMING_SNAKE_CASE__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
snake_case = BioGptForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# create attention mask
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
snake_case = self.seq_length // 2
snake_case = 0
# first forward pass
snake_case ,snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case = ids_tensor((1,) , lowerCAmelCase ).item() + 1
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase )] , dim=1 , )
# get two different outputs
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , past_key_values=lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
# first forward pass
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
snake_case ,snake_case = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[
'last_hidden_state'
]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
snake_case = BioGptForCausalLM(lowerCAmelCase )
model.to(lowerCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case ( self , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(lowerCAmelCase )
snake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = BioGptForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : str = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[str] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase , gradient_checkpointing=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = 'left'
# Define PAD Token = EOS Token = 50256
snake_case = tokenizer.eos_token
snake_case = model.config.eos_token_id
# use different length sentences to test batching
snake_case = [
'Hello, my dog is a little',
'Today, I',
]
snake_case = tokenizer(lowerCAmelCase , return_tensors='pt' , padding=lowerCAmelCase )
snake_case = inputs['input_ids'].to(lowerCAmelCase )
snake_case = model.generate(
input_ids=lowerCAmelCase , attention_mask=inputs['attention_mask'].to(lowerCAmelCase ) , )
snake_case = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase )
snake_case = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
snake_case = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings )
snake_case = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = BioGptModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = 'multi_label_classification'
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
snake_case = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
snake_case = model(lowerCAmelCase )[0]
snake_case = 4_23_84
snake_case = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase )
snake_case = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
torch.manual_seed(0 )
snake_case = tokenizer('COVID-19 is' , return_tensors='pt' ).to(lowerCAmelCase )
snake_case = model.generate(
**lowerCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase , )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase )
snake_case = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 149
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaImgaImgPipeline
__magic_name__ = ['image_embeds', 'negative_image_embeds', 'image']
__magic_name__ = [
'image_embeds',
'negative_image_embeds',
'image',
]
__magic_name__ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def a_ ( self , __snake_case , __snake_case=0 ):
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case = '''A red cartoon frog, 4k'''
snake_case = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 127
|
_SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
_SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = end_pointa[0] - end_pointa[0]
snake_case = end_pointa[1] - end_pointa[1]
snake_case = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = ab[1] * ac[2] - ab[2] * ac[1] # *i
snake_case = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
snake_case = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return tuple(round(UpperCamelCase_ ,UpperCamelCase_ ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 10 ):
"""simple docstring"""
snake_case = create_vector(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = create_vector(UpperCamelCase_ ,UpperCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase_ ,UpperCamelCase_ ) ,UpperCamelCase_ )
| 127
| 1
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : float) ->float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(lowercase_ ) )
A__ = 20 * np.logaa(lowercase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
A__ = get_bounds(lowercase_ , lowercase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowercase_ )
plt.show()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(lowercase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowercase_ , -2 * pi ) )
plt.show()
| 231
|
from __future__ import annotations
from collections import Counter
from random import random
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ = {}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str) ->None:
'''simple docstring'''
A__ = {}
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : float) ->None:
'''simple docstring'''
if nodea not in self.connections:
self.add_node(UpperCAmelCase__)
if nodea not in self.connections:
self.add_node(UpperCAmelCase__)
A__ = probability
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->list[str]:
'''simple docstring'''
return list(self.connections)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->str:
'''simple docstring'''
A__ = 0
A__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, int]:
"""simple docstring"""
A__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase_ , lowercase_ , lowercase_ )
A__ = Counter(graph.get_nodes() )
A__ = start
for _ in range(lowercase_ ):
A__ = graph.transition(lowercase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
| 1
|
import math
from datetime import datetime, timedelta
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> datetime:
'''simple docstring'''
__UpperCamelCase : List[str] = year % 19
__UpperCamelCase : Any = year % 4
__UpperCamelCase : Optional[Any] = year % 7
__UpperCamelCase : int = math.floor(year / 100)
__UpperCamelCase : Optional[int] = math.floor((13 + 8 * leap_day_inhibits) / 25)
__UpperCamelCase : List[Any] = leap_day_inhibits / 4
__UpperCamelCase : Optional[Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__UpperCamelCase : int = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__UpperCamelCase : Dict = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__UpperCamelCase : Optional[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18)
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
lowercase : Dict = 'will be' if year > datetime.now().year else 'was'
print(f"Easter in {year} {tense} {gauss_easter(year)}")
| 232
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
return getitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
return setitem, k, v
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
return delitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
a__ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a__ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
_snake_case : List[Any] = HashMap(initial_block_size=4 )
_snake_case : int = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case , _snake_case : Tuple = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : int = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def lowercase ( ) -> Optional[int]:
def is_public(SCREAMING_SNAKE_CASE__ : str ) -> bool:
return not name.startswith("""_""" )
_snake_case : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
_snake_case : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 317
| 0
|
"""simple docstring"""
__A : Tuple = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = set()
# keep track of all the paths to be checked
A = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A = queue.pop(0 )
# get the last node from the path
A = path[-1]
if node not in explored:
A = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A = list(UpperCamelCase__ )
new_path.append(UpperCamelCase__ )
queue.append(UpperCamelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCamelCase__ )
# in case there's no path between the 2 nodes
return []
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A = [start]
A = set(UpperCamelCase__ )
# Keep tab on distances from `start` node.
A = {start: 0, target: -1}
while queue:
A = queue.pop(0 )
if node == target:
A = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCamelCase__ )
queue.append(UpperCamelCase__ )
A = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 351
|
"""simple docstring"""
import numpy as np
import datasets
__A : Optional[int] = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__A : Any = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__A : List[str] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ (self : Dict):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence") , id="X"),
}) , )
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
# convert to numpy arrays
A = np.array(__SCREAMING_SNAKE_CASE)
A = np.array(__SCREAMING_SNAKE_CASE)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("Expected `X` to be a 2D vector")
if len(reference_distribution.shape) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector")
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension")
# Get mahalanobis distance for each prediction
A = X - np.mean(__SCREAMING_SNAKE_CASE)
A = np.cov(reference_distribution.T)
try:
A = np.linalg.inv(__SCREAMING_SNAKE_CASE)
except np.linalg.LinAlgError:
A = np.linalg.pinv(__SCREAMING_SNAKE_CASE)
A = np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = np.dot(__SCREAMING_SNAKE_CASE , X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| 57
| 0
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def a__ ( ) -> None:
print("Making key files..." )
make_key_files("rsa" , 1_0_2_4 )
print("Key files generation successful." )
def a__ ( __SCREAMING_SNAKE_CASE ) -> tuple[tuple[int, int], tuple[int, int]]:
print("Generating prime p..." )
__lowerCAmelCase: Optional[Any] = rabinMiller.generate_large_prime(__SCREAMING_SNAKE_CASE )
print("Generating prime q..." )
__lowerCAmelCase: Dict = rabinMiller.generate_large_prime(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
__lowerCAmelCase: str = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
__lowerCAmelCase: Optional[int] = cryptoMath.find_mod_inverse(__SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) )
__lowerCAmelCase: int = (n, e)
__lowerCAmelCase: Any = (n, d)
return (public_key, private_key)
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print("\nWARNING:" )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"Use a different name or delete these files and re-run this program." )
sys.exit()
__lowerCAmelCase , __lowerCAmelCase: Any = generate_key(__SCREAMING_SNAKE_CASE )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , "w" ) as out_file:
out_file.write(F"{key_size},{public_key[0]},{public_key[1]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , "w" ) as out_file:
out_file.write(F"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 217
|
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class snake_case ( __snake_case ):
# to overwrite at feature extractactor specific tests
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
@property
def lowercase_ ( self : str)-> Any:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "feature_size"))
self.assertTrue(hasattr(UpperCamelCase__ , "sampling_rate"))
self.assertTrue(hasattr(UpperCamelCase__ , "padding_value"))
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: str = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: int = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(UpperCamelCase__) == len(UpperCamelCase__) for x, y in zip(UpperCamelCase__ , processed_features[input_name])))
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="np")
__lowerCAmelCase: Optional[int] = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: int = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Any = BatchFeature({input_name: speech_inputs} , tensor_type="pt")
__lowerCAmelCase: str = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def lowercase_ ( self : str)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="tf")
__lowerCAmelCase: int = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str]=False)-> Optional[int]:
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase__ : Tuple):
__lowerCAmelCase: Optional[int] = len(input[0])
for input_slice in input[1:]:
if len(UpperCamelCase__) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase__ : str , UpperCamelCase__ : str):
if len(UpperCamelCase__) != len(UpperCamelCase__):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase__ , UpperCamelCase__):
if not np.allclose(np.asarray(UpperCamelCase__) , np.asarray(UpperCamelCase__) , atol=1e-3):
return False
return True
__lowerCAmelCase: List[str] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase__)
__lowerCAmelCase: Tuple = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Optional[int] = self.feat_extract_tester.seq_length_diff
__lowerCAmelCase: str = self.feat_extract_tester.max_seq_length + pad_diff
__lowerCAmelCase: Tuple = self.feat_extract_tester.min_seq_length
__lowerCAmelCase: Union[str, Any] = self.feat_extract_tester.batch_size
__lowerCAmelCase: Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowerCAmelCase: Optional[int] = feat_extract.pad(UpperCamelCase__ , padding=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(UpperCamelCase__ , padding="longest")
__lowerCAmelCase: int = input_a[input_name]
__lowerCAmelCase: Tuple = feat_extract.pad(UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[-1]))
__lowerCAmelCase: Optional[Any] = input_a[input_name]
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")
__lowerCAmelCase: Optional[int] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="max_length")[input_name]
__lowerCAmelCase: Any = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , return_tensors="np")
__lowerCAmelCase: Optional[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , pad_to_multiple_of=1_0)
__lowerCAmelCase: Tuple = input_a[input_name]
__lowerCAmelCase: Tuple = feat_extract.pad(UpperCamelCase__ , padding="longest" , pad_to_multiple_of=1_0)
__lowerCAmelCase: List[str] = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=UpperCamelCase__)
__lowerCAmelCase: str = input_a[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=UpperCamelCase__ , return_tensors="np" , )
__lowerCAmelCase: List[str] = input_a[input_name]
self.assertTrue(all(len(UpperCamelCase__) % 1_0 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
__lowerCAmelCase: Optional[Any] = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(UpperCamelCase__) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
__lowerCAmelCase: Any = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int]=False)-> str:
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase__ : List[Any]):
__lowerCAmelCase: List[str] = len(input[0])
for input_slice in input[1:]:
if len(UpperCamelCase__) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]):
if len(UpperCamelCase__) != len(UpperCamelCase__):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase__ , UpperCamelCase__):
if not np.allclose(np.asarray(UpperCamelCase__) , np.asarray(UpperCamelCase__) , atol=1e-3):
return False
return True
__lowerCAmelCase: Dict = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase__)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
# truncate to smallest
__lowerCAmelCase: int = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , truncation=UpperCamelCase__)
__lowerCAmelCase: Any = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]))
__lowerCAmelCase: Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
# truncate to smallest with np
__lowerCAmelCase: Optional[int] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np" , truncation=UpperCamelCase__ , )
__lowerCAmelCase: List[str] = input_a[input_name]
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np")
__lowerCAmelCase: Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
# truncate to middle
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=UpperCamelCase__ , return_tensors="np" , )
__lowerCAmelCase: int = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=UpperCamelCase__)
__lowerCAmelCase: Dict = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , return_tensors="np")
__lowerCAmelCase: str = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , truncation=UpperCamelCase__)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="longest" , truncation=UpperCamelCase__)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="longest" , truncation=UpperCamelCase__)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="max_length" , truncation=UpperCamelCase__)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Tuple = 1_2
__lowerCAmelCase: Optional[int] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=UpperCamelCase__ , truncation=UpperCamelCase__ , )
__lowerCAmelCase: Dict = input_a[input_name]
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=UpperCamelCase__ , )
__lowerCAmelCase: str = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowerCAmelCase: Optional[Any] = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
__lowerCAmelCase: Optional[Any] = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
def lowercase_ ( self : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase__)
def lowercase_ ( self : List[Any])-> Tuple:
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase__)
@require_torch
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: str = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: str = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
@require_tf
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: List[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: Any = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase: Any = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="tf")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1e-2)
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_dict
__lowerCAmelCase: Any = True
__lowerCAmelCase: str = self.feature_extraction_class(**UpperCamelCase__)
__lowerCAmelCase: List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Tuple = [len(UpperCamelCase__) for x in speech_inputs]
__lowerCAmelCase: Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Optional[Any] = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")
self.assertIn("attention_mask" , UpperCamelCase__)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , UpperCamelCase__)
def lowercase_ ( self : List[str])-> Any:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_dict
__lowerCAmelCase: str = True
__lowerCAmelCase: int = self.feature_extraction_class(**UpperCamelCase__)
__lowerCAmelCase: int = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: str = [len(UpperCamelCase__) for x in speech_inputs]
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: List[str] = min(UpperCamelCase__)
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="np")
self.assertIn("attention_mask" , UpperCamelCase__)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 217
| 1
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __a : Any , __a : Optional[int]=7 , __a : Union[str, Any]=3 , __a : int=18 , __a : List[str]=30 , __a : Any=4_00 , __a : Optional[int]=True , __a : List[Any]=None , __a : int=True , ):
_a = size if size is not None else {"height": 18, "width": 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def UpperCamelCase__ ( self : int ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "clusters" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
def UpperCamelCase__ ( self : Dict ):
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCamelCase__ ( self : int ):
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , obj[key] ) )
else:
self.assertEqual(obj[key] , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(__a , "image_processor.json" )
image_processor_first.to_json_file(__a )
_a = self.image_processing_class.from_json_file(__a ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__a )
_a = self.image_processing_class.from_pretrained(__a ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __a )
@unittest.skip("ImageGPT requires clusters at initialization" )
def UpperCamelCase__ ( self : Optional[int] ):
pass
def _lowerCamelCase ( ) -> str:
_a = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
_a = Image.open(dataset[4]["file"] )
_a = Image.open(dataset[5]["file"] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : str ):
_a = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __a )
# test batched
_a = image_processing(__a , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __a )
| 358
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346
| 0
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowercase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowercase : Optional[Any] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A_ ( A__ ) -> Optional[Any]:
a__ : List[str] = None
# source code of `config_class`
a__ : Tuple = inspect.getsource(A__ )
a__ : Any = _re_checkpoint.findall(A__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
a__ : Union[str, Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
a__ : str = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
a__ : Optional[int] = ckpt_name
break
return checkpoint
def A_ ( ) -> List[str]:
a__ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
a__ : Dict = get_checkpoint_from_config_class(A__ )
a__ : List[str] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A__ )
if len(A__ ) > 0:
a__ : Dict = '\n'.join(sorted(A__ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 99
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple:
# in NER datasets, the last column is usually reserved for NER label
_a : Optional[int] = label_idx
def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Any = mode.value
_a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : int = 1
_a : int = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
_a : str = []
_a : str = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
_a : List[str] = []
_a : str = []
else:
_a : List[Any] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]:
_a : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : List[Any] = f.read().splitlines()
if "O" not in labels:
_a : Union[str, Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : Optional[int] = f.read().splitlines()
if "O" not in labels:
_a : Optional[Any] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : List[Any] = mode.value
_a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : List[str] = 1
_a : Optional[Any] = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[Any] = []
_a : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict:
_a : Optional[Any] = 0
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[str] = preds_list[example_id]
_a : str = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 294
| 0
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_lowerCamelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "summarization"
_UpperCAmelCase : Optional[Any] = ["loss"]
_UpperCAmelCase : List[str] = ROUGE_KEYS
_UpperCAmelCase : Any = "rouge2"
def __init__( self : Optional[Any] , lowercase : Union[str, Any] , **lowercase : List[Any] ):
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
_snake_case = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
_snake_case = Path(self.output_dir ) / 'metrics.json'
_snake_case = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
_snake_case = 0
_snake_case = defaultdict(lowercase )
_snake_case = self.config.model_type
_snake_case = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
_snake_case = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_snake_case = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
_snake_case = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_snake_case = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_snake_case = get_git_info()['repo_sha']
_snake_case = hparams.num_workers
_snake_case = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
_snake_case = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_snake_case = self.decoder_start_token_id
_snake_case = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
_snake_case = False
_snake_case = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_snake_case = self.hparams.eval_max_gen_length
else:
_snake_case = self.model.config.max_length
_snake_case = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A ( self : str , lowercase : Dict[str, torch.Tensor] ):
'''simple docstring'''
_snake_case = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
_snake_case = True
return readable_batch
def A ( self : Dict , lowercase : List[Any] , **lowercase : Union[str, Any] ):
'''simple docstring'''
return self.model(lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : List[int] ):
'''simple docstring'''
_snake_case = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def A ( self : Optional[int] , lowercase : dict ):
'''simple docstring'''
_snake_case = self.tokenizer.pad_token_id
_snake_case , _snake_case = batch['input_ids'], batch['attention_mask']
_snake_case = batch['labels']
if isinstance(self.model , lowercase ):
_snake_case = self.model._shift_right(lowercase )
else:
_snake_case = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_snake_case = decoder_input_ids
self.save_readable_batch(lowercase )
_snake_case = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
_snake_case = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_snake_case = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
_snake_case = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_snake_case = nn.functional.log_softmax(lowercase , dim=-1 )
_snake_case , _snake_case = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def A ( self : Dict ):
'''simple docstring'''
return self.tokenizer.pad_token_id
def A ( self : Union[str, Any] , lowercase : List[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = self._step(lowercase )
_snake_case = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
_snake_case = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
_snake_case = batch['input_ids'].shape[0]
_snake_case = batch['input_ids'].eq(self.pad ).sum()
_snake_case = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A ( self : List[str] , lowercase : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
return self._generative_step(lowercase )
def A ( self : Union[str, Any] , lowercase : Dict , lowercase : Any="val" ):
'''simple docstring'''
self.step_count += 1
_snake_case = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_snake_case = losses['loss']
_snake_case = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
_snake_case = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_snake_case = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
_snake_case = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
_snake_case = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
_snake_case = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def A ( self : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return calculate_rouge(lowercase , lowercase )
def A ( self : str , lowercase : dict ):
'''simple docstring'''
_snake_case = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_snake_case = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_snake_case = (time.time() - ta) / batch['input_ids'].shape[0]
_snake_case = self.ids_to_clean_text(lowercase )
_snake_case = self.ids_to_clean_text(batch['labels'] )
_snake_case = self._step(lowercase )
_snake_case = dict(zip(self.loss_names , lowercase ) )
_snake_case = self.calc_generative_metrics(lowercase , lowercase )
_snake_case = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] ):
'''simple docstring'''
return self._generative_step(lowercase )
def A ( self : Any , lowercase : Optional[int] ):
'''simple docstring'''
return self.validation_epoch_end(lowercase , prefix='test' )
def A ( self : Any , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.n_obs[type_path]
_snake_case = self.target_lens[type_path]
_snake_case = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def A ( self : str , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_snake_case = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_snake_case = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def A ( self : Any ):
'''simple docstring'''
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def A ( self : Optional[int] ):
'''simple docstring'''
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def A ( lowercase : Union[str, Any] , lowercase : Tuple ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
'--max_source_length' , default=1_024 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase )
parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase )
parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowercase , default=500 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
'--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : str = "translation"
_UpperCAmelCase : List[str] = ["loss"]
_UpperCAmelCase : Tuple = ["bleu"]
_UpperCAmelCase : Union[str, Any] = "bleu"
def __init__( self : List[str] , lowercase : List[str] , **lowercase : int ):
'''simple docstring'''
super().__init__(lowercase , **lowercase )
_snake_case = hparams.src_lang
_snake_case = hparams.tgt_lang
def A ( self : Dict , lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
return calculate_bleu(lowercase , lowercase )
def a_ ( __lowercase : List[str] , __lowercase : Optional[Any]=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
_snake_case = SummarizationModule(__lowercase )
else:
_snake_case = TranslationModule(__lowercase )
_snake_case = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
_snake_case = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_snake_case = os.environ.get('WANDB_PROJECT' , __lowercase )
_snake_case = WandbLogger(name=model.output_dir.name , project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_snake_case = WandbLogger(name=model.output_dir.name , project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
_snake_case = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_snake_case = False
_snake_case = args.val_metric == 'loss'
_snake_case = generic_train(
__lowercase , __lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __lowercase ) , early_stopping_callback=__lowercase , logger=__lowercase , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
_snake_case = ''
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=__lowercase ) )
if checkpoints:
_snake_case = checkpoints[-1]
_snake_case = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
_lowerCamelCase : str = pl.Trainer.add_argparse_args(parser)
_lowerCamelCase : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_lowerCamelCase : List[str] = parser.parse_args()
main(args)
| 130
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , lowercase : Union[str, Any] , lowercase : str=7 , lowercase : Union[str, Any]=3 , lowercase : Tuple=30 , lowercase : Optional[Any]=400 , lowercase : List[Any]=True , lowercase : Any=None , lowercase : str=True , lowercase : Tuple=[0.5, 0.5, 0.5] , lowercase : List[Any]=[0.5, 0.5, 0.5] , lowercase : Union[str, Any]=True , lowercase : List[Any]=1 / 255 , lowercase : int=True , ):
'''simple docstring'''
_snake_case = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_pad
def A ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[int] , lowercase : List[Any] , lowercase : Tuple=False ):
'''simple docstring'''
if not batched:
_snake_case = image_inputs[0]
if isinstance(lowercase , Image.Image ):
_snake_case , _snake_case = image.size
else:
_snake_case , _snake_case = image.shape[1], image.shape[2]
if w < h:
_snake_case = int(self.size['shortest_edge'] * h / w )
_snake_case = self.size['shortest_edge']
elif w > h:
_snake_case = self.size['shortest_edge']
_snake_case = int(self.size['shortest_edge'] * w / h )
else:
_snake_case = self.size['shortest_edge']
_snake_case = self.size['shortest_edge']
else:
_snake_case = []
for image in image_inputs:
_snake_case , _snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case = max(lowercase , key=lambda lowercase : item[0] )[0]
_snake_case = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = DeformableDetrImageProcessor if is_vision_available() else None
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = DeformableDetrImageProcessingTester(self )
@property
def A ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'image_mean' ) )
self.assertTrue(hasattr(lowercase , 'image_std' ) )
self.assertTrue(hasattr(lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'do_rescale' ) )
self.assertTrue(hasattr(lowercase , 'do_pad' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , lowercase )
_snake_case = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowercase , return_tensors='pt' ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {'image_id': 39_769, 'annotations': target}
# encode them
_snake_case = DeformableDetrImageProcessor()
_snake_case = image_processing(images=lowercase , annotations=lowercase , return_tensors='pt' )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
_snake_case = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
_snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
_snake_case = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
_snake_case = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_snake_case = DeformableDetrImageProcessor(format='coco_panoptic' )
_snake_case = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors='pt' )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
_snake_case = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
_snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
_snake_case = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify masks
_snake_case = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
| 130
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyVaaControlnetPipeline
a__ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a__ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
torch.manual_seed(0)
a__: Union[str, Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: Optional[int] = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
torch.manual_seed(0)
a__: str = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: str = self.dummy_unet
a__: List[Any] = self.dummy_movq
a__: str = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase)).to(lowercase)
a__: Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase)
# create hint
a__: Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
if str(lowercase).startswith('mps'):
a__: List[str] = torch.manual_seed(lowercase)
else:
a__: Union[str, Any] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Union[str, Any] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: str = 'cpu'
a__: int = self.get_dummy_components()
a__: Optional[int] = self.pipeline_class(**lowercase)
a__: Any = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[Any] = pipe(**self.get_dummy_inputs(lowercase))
a__: Any = output.images
a__: Any = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__: List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
a__: Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
a__: int = torch.from_numpy(np.array(lowercase)).float() / 255.0
a__: Tuple = hint.permute(2 , 0 , 1).unsqueeze(0)
a__: List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Tuple = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa)
a__: Union[str, Any] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Union[str, Any] = 'A robot, 4k photo'
a__: List[str] = torch.Generator(device='cuda').manual_seed(0)
a__ , a__: Union[str, Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: Optional[Any] = torch.Generator(device='cuda').manual_seed(0)
a__: Optional[int] = pipeline(
image_embeds=lowercase , negative_image_embeds=lowercase , hint=lowercase , generator=lowercase , num_inference_steps=1_00 , output_type='np' , )
a__: Dict = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
if rng is None:
a__: Any = global_rng
a__: int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=4_00 , lowercase=20_00 , lowercase=1 , lowercase=0.0 , lowercase=1_60_00 , lowercase=True , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = parent
a__: Optional[int] = batch_size
a__: Optional[Any] = min_seq_length
a__: Optional[int] = max_seq_length
a__: Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: Dict = feature_size
a__: Any = padding_value
a__: Optional[Any] = sampling_rate
a__: Optional[Any] = return_attention_mask
a__: str = do_normalize
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self , lowercase=False , lowercase=False) -> Tuple:
'''simple docstring'''
def _flatten(lowercase):
return list(itertools.chain(*lowercase))
if equal_length:
a__: Dict = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a__: List[Any] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a__: str = [np.asarray(lowercase) for x in speech_inputs]
return speech_inputs
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = WavaVecaFeatureExtractor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = WavaVecaFeatureExtractionTester(self)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowercase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0) - 1) < 1e-3))
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a__: Optional[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: List[str] = [np.asarray(lowercase) for speech_input in speech_inputs]
# Test not batched input
a__: Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='np').input_values
a__: Dict = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test batched
a__: Dict = feat_extract(lowercase , return_tensors='np').input_values
a__: int = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test 2-D numpy arrays are batched.
a__: int = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
a__: Union[str, Any] = np.asarray(lowercase)
a__: int = feat_extract(lowercase , return_tensors='np').input_values
a__: Any = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: List[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Optional[int] = ['longest', 'max_length', 'do_not_pad']
a__: List[Any] = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: Dict = feat_extract(lowercase , padding=lowercase , max_length=lowercase , return_tensors='np')
a__: Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Optional[int] = range(8_00 , 14_00 , 2_00)
a__: List[str] = [floats_list((1, x))[0] for x in lengths]
a__: Tuple = ['longest', 'max_length', 'do_not_pad']
a__: Dict = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: int = feat_extract(lowercase , max_length=lowercase , padding=lowercase)
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Any = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Dict = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='max_length' , return_tensors='np')
a__: int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: int = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: str = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='longest' , return_tensors='np')
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
a__: Dict = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Tuple = feat_extract(
lowercase , truncation=lowercase , max_length=20_00 , padding='longest' , return_tensors='np')
a__: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
@require_torch
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
import torch
a__: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Tuple = np.random.rand(1_00).astype(np.floataa)
a__: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a__: Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
a__: str = WavaVecaConfig.from_pretrained(lowercase)
a__: str = WavaVecaFeatureExtractor.from_pretrained(lowercase)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
| 290
| 1
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase : Tuple = HfArgumentParser(InitializationArguments)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase : str = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase : List[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 159
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case (_a ):
lowerCAmelCase__ = "ibert"
def __init__( self : int , _UpperCAmelCase : Optional[int]=3_0522 , _UpperCAmelCase : Union[str, Any]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : str=1E-12 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any="none" , **_UpperCAmelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : str = position_embedding_type
_lowerCAmelCase : int = quant_mode
_lowerCAmelCase : str = force_dequant
class __snake_case (_a ):
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 159
| 1
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a_ = _symbol_database.Default()
a_ = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
a_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a_ = None
a_ = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a_ = 45
a_ = 1_581
a_ = 1_517
a_ = 1_570
a_ = 1_584
a_ = 1_793
a_ = 1_795
a_ = 1_916
a_ = 1_864
a_ = 1_905
a_ = 1_919
a_ = 2_429
a_ = 2_208
a_ = 2_418
a_ = 2_323
a_ = 2_407
# @@protoc_insertion_point(module_scope)
| 330
|
from string import ascii_lowercase, ascii_uppercase
def a__ ( _UpperCamelCase : str ):
if not sentence:
return ""
__lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 330
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["MaskFormerFeatureExtractor"]
_snake_case = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
_snake_case = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 343
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[Any]:
_A : Tuple = parent
_A : Any = batch_size
_A : int = image_size
_A : Tuple = num_channels
_A : List[Any] = num_stages
_A : Any = hidden_sizes
_A : Union[str, Any] = depths
_A : Union[str, Any] = is_training
_A : Tuple = use_labels
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = num_labels
_A : List[str] = initializer_range
_A : str = out_features
_A : int = out_indices
_A : List[Any] = scope
def a__ ( self ) -> str:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> int:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[Any] = None
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> int:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : int = ConvNextModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> int:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : List[str] = self.default_image_processor
_A : int = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Dict = model(**_a )
# verify the logits
_A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Any = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> List[str]:
_A : Optional[int] = ConvNextModelTester(self )
| 343
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __UpperCamelCase ( _A : str , _A : Dict , _A : Any ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =state_dict.pop(UpperCAmelCase__ )
lowerCamelCase_ =val
def __UpperCamelCase ( _A : List[str] ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase_ =key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowerCamelCase_ =value
else:
lowerCamelCase_ =value
return new_state_dict
def __UpperCamelCase ( _A : int ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =""""""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase_ =state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowerCamelCase_ =state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ =in_proj_weight[:256, :]
lowerCamelCase_ =in_proj_bias[:256]
lowerCamelCase_ =in_proj_weight[256:512, :]
lowerCamelCase_ =in_proj_bias[256:512]
lowerCamelCase_ =in_proj_weight[-256:, :]
lowerCamelCase_ =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase_ =state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
lowerCamelCase_ =state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ =in_proj_weight[:256, :]
lowerCamelCase_ =in_proj_bias[:256]
lowerCamelCase_ =in_proj_weight[256:512, :]
lowerCamelCase_ =in_proj_bias[256:512]
lowerCamelCase_ =in_proj_weight[-256:, :]
lowerCamelCase_ =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCamelCase_ =state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
lowerCamelCase_ =state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCamelCase_ =in_proj_weight_cross_attn[:256, :]
lowerCamelCase_ =in_proj_bias_cross_attn[:256]
lowerCamelCase_ =in_proj_weight_cross_attn[256:512, :]
lowerCamelCase_ =in_proj_bias_cross_attn[256:512]
lowerCamelCase_ =in_proj_weight_cross_attn[-256:, :]
lowerCamelCase_ =in_proj_bias_cross_attn[-256:]
def __UpperCamelCase ( _A : int , _A : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =image.size
lowerCamelCase_ =max(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase_ =800 if """detection""" in checkpoint_url else 1000
lowerCamelCase_ =target_max_size / current_max_size
lowerCamelCase_ =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __UpperCamelCase ( _A : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =F.to_tensor(UpperCAmelCase__ )
lowerCamelCase_ =F.normalize(UpperCAmelCase__ , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : List[str] , _A : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
lowerCamelCase_ =torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase_ =rename_backbone_keys(UpperCAmelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase_ ="""model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCamelCase_ =state_dict.pop(UpperCAmelCase__ )
lowerCamelCase_ =val
# create HuggingFace model and load state dict
lowerCamelCase_ =TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowerCamelCase_ =15
lowerCamelCase_ =2
lowerCamelCase_ ={0: """table""", 1: """table rotated"""}
lowerCamelCase_ =idalabel
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
else:
lowerCamelCase_ =125
lowerCamelCase_ =6
lowerCamelCase_ ={
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
lowerCamelCase_ =idalabel
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
lowerCamelCase_ =DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
lowerCamelCase_ =TableTransformerForObjectDetection(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# verify our conversion
lowerCamelCase_ ="""example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
lowerCamelCase_ =hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCAmelCase__ )
lowerCamelCase_ =Image.open(UpperCAmelCase__ ).convert("""RGB""" )
lowerCamelCase_ =normalize(resize(UpperCAmelCase__ , UpperCAmelCase__ ) ).unsqueeze(0 )
lowerCamelCase_ =model(UpperCAmelCase__ )
if "detection" in checkpoint_url:
lowerCamelCase_ =(1, 15, 3)
lowerCamelCase_ =torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
lowerCamelCase_ =torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
lowerCamelCase_ =(1, 125, 7)
lowerCamelCase_ =torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
lowerCamelCase_ =torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
lowerCamelCase_ =(
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(UpperCAmelCase__ )
image_processor.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A : Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 154
|
'''simple docstring'''
import os
def lowerCamelCase ( UpperCAmelCase__ : str = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(UpperCAmelCase__ ) , UpperCAmelCase__ ) ) as input_file:
lowercase_ : str = [
[int(UpperCAmelCase__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
lowercase_ : Optional[Any] = len(UpperCAmelCase__ )
lowercase_ : Any = len(matrix[0] )
lowercase_ : Union[str, Any] = [[-1 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
lowercase_ : int = matrix[i][0]
for j in range(1 , UpperCAmelCase__ ):
for i in range(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase__ ):
lowercase_ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase_ : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 239
| 0
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = AudioLDMPipeline
_a = TEXT_TO_AUDIO_PARAMS
_a = TEXT_TO_AUDIO_BATCH_PARAMS
_a = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def a__ ( self ) -> Any:
torch.manual_seed(0 )
_A : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_a , )
_A : Tuple = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_A : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_A : Any = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_A : Optional[int] = ClapTextModelWithProjection(_a )
_A : Union[str, Any] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_A : int = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_a , )
_A : Optional[int] = SpeechTaHifiGan(_a )
_A : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def a__ ( self , _a , _a=0 ) -> Optional[Any]:
if str(_a ).startswith("""mps""" ):
_A : int = torch.manual_seed(_a )
else:
_A : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
_A : str = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def a__ ( self ) -> Tuple:
_A : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Tuple = self.get_dummy_components()
_A : List[Any] = AudioLDMPipeline(**_a )
_A : Any = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : List[str] = audioldm_pipe(**_a )
_A : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_a ) == 256
_A : Tuple = audio[:10]
_A : Union[str, Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a__ ( self ) -> Optional[int]:
_A : List[str] = self.get_dummy_components()
_A : int = AudioLDMPipeline(**_a )
_A : List[Any] = audioldm_pipe.to(_a )
_A : Any = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = self.get_dummy_inputs(_a )
_A : Any = 3 * [inputs["""prompt"""]]
# forward
_A : Tuple = audioldm_pipe(**_a )
_A : Optional[int] = output.audios[0]
_A : Union[str, Any] = self.get_dummy_inputs(_a )
_A : Optional[int] = 3 * [inputs.pop("""prompt""" )]
_A : Tuple = audioldm_pipe.tokenizer(
_a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , )
_A : List[Any] = text_inputs["""input_ids"""].to(_a )
_A : Dict = audioldm_pipe.text_encoder(
_a , )
_A : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_A : Optional[int] = F.normalize(_a , dim=-1 )
_A : List[Any] = prompt_embeds
# forward
_A : Dict = audioldm_pipe(**_a )
_A : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a__ ( self ) -> List[str]:
_A : int = self.get_dummy_components()
_A : Union[str, Any] = AudioLDMPipeline(**_a )
_A : int = audioldm_pipe.to(_a )
_A : Optional[int] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : str = 3 * ["""this is a negative prompt"""]
_A : List[str] = negative_prompt
_A : Any = 3 * [inputs["""prompt"""]]
# forward
_A : Union[str, Any] = audioldm_pipe(**_a )
_A : Union[str, Any] = output.audios[0]
_A : str = self.get_dummy_inputs(_a )
_A : Dict = 3 * [inputs.pop("""prompt""" )]
_A : Any = []
for p in [prompt, negative_prompt]:
_A : Optional[int] = audioldm_pipe.tokenizer(
_a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="""pt""" , )
_A : Dict = text_inputs["""input_ids"""].to(_a )
_A : str = audioldm_pipe.text_encoder(
_a , )
_A : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_A : int = F.normalize(_a , dim=-1 )
embeds.append(_a )
_A , _A : str = embeds
# forward
_A : Dict = audioldm_pipe(**_a )
_A : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a__ ( self ) -> Any:
_A : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Tuple = self.get_dummy_components()
_A : Tuple = PNDMScheduler(skip_prk_steps=_a )
_A : List[Any] = AudioLDMPipeline(**_a )
_A : int = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : List[str] = """egg cracking"""
_A : Dict = audioldm_pipe(**_a , negative_prompt=_a )
_A : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_a ) == 256
_A : Tuple = audio[:10]
_A : Union[str, Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a__ ( self ) -> List[str]:
_A : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : List[str] = self.get_dummy_components()
_A : Any = PNDMScheduler(skip_prk_steps=_a )
_A : Tuple = AudioLDMPipeline(**_a )
_A : List[Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_A : Tuple = audioldm_pipe(_a , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_A : int = 2
_A : str = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_A : Union[str, Any] = 2
_A : str = audioldm_pipe(_a , num_inference_steps=2 , num_waveforms_per_prompt=_a ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_A : List[Any] = 2
_A : Optional[int] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a__ ( self ) -> Any:
_A : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Optional[Any] = self.get_dummy_components()
_A : List[str] = AudioLDMPipeline(**_a )
_A : Optional[Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : Tuple = audioldm_pipe(audio_length_in_s=0.016 , **_a )
_A : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(_a ) / vocoder_sampling_rate == 0.016
_A : Union[str, Any] = audioldm_pipe(audio_length_in_s=0.032 , **_a )
_A : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_a ) / vocoder_sampling_rate == 0.032
def a__ ( self ) -> str:
_A : List[Any] = self.get_dummy_components()
_A : Optional[Any] = AudioLDMPipeline(**_a )
_A : Union[str, Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : str = ["""hey"""]
_A : Union[str, Any] = audioldm_pipe(_a , num_inference_steps=1 )
_A : Optional[int] = output.audios.shape
assert audio_shape == (1, 256)
_A : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_A : Dict = SpeechTaHifiGan(_a ).to(_a )
_A : Tuple = audioldm_pipe(_a , num_inference_steps=1 )
_A : Tuple = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a__ ( self ) -> Any:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_a )
def a__ ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=_a )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a )
@slow
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> int:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Any = np.random.RandomState(_a ).standard_normal((1, 8, 128, 16) )
_A : Dict = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_A : List[str] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def a__ ( self ) -> Any:
_A : List[Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_A : Tuple = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Union[str, Any] = self.get_inputs(_a )
_A : Dict = 25
_A : Union[str, Any] = audioldm_pipe(**_a ).audios[0]
assert audio.ndim == 1
assert len(_a ) == 8_1920
_A : int = audio[7_7230:7_7240]
_A : Dict = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_A : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def a__ ( self ) -> Any:
_A : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_A : Union[str, Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_A : List[Any] = audioldm_pipe.to(_a )
audioldm_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = self.get_inputs(_a )
_A : Optional[int] = audioldm_pipe(**_a ).audios[0]
assert audio.ndim == 1
assert len(_a ) == 8_1920
_A : Any = audio[2_7780:2_7790]
_A : int = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_A : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 343
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class lowercase :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_A : Optional[Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_A : Dict = titles if not isinstance(_a , _a ) else [titles]
_A : Tuple = texts if not isinstance(_a , _a ) else [texts]
_A : Any = len(_a )
_A : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
_A : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def a__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> List[DPRSpanPrediction]:
_A : Dict = reader_input["""input_ids"""]
_A , _A , _A : Tuple = reader_output[:3]
_A : List[str] = len(_a )
_A : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_A : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A : Tuple = sequence_ids.index(self.pad_token_id )
else:
_A : Tuple = len(_a )
_A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , _a , _a , _a , _a , ) -> List[DPRSpanPrediction]:
_A : Tuple = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a )
_A : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_A : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
| 343
| 1
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ )
for i in range(length - 1 ):
UpperCAmelCase__ : Optional[Any] = i
for k in range(i + 1 , lowerCAmelCase__ ):
if collection[k] < collection[least]:
UpperCAmelCase__ : Union[str, Any] = k
if least != i:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 181
|
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
UpperCamelCase__ , UpperCamelCase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
UpperCamelCase__ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
UpperCamelCase__ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCamelCase__ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 181
| 1
|
import math
from collections.abc import Callable
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> float:
UpperCamelCase_: float = xa
UpperCamelCase_: float = xa
while True:
if x_n == x_na or function(lowerCamelCase ) == function(lowerCamelCase ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
UpperCamelCase_: float = x_na - (
function(lowerCamelCase ) / ((function(lowerCamelCase ) - function(lowerCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCamelCase_: Optional[int] = x_na
UpperCamelCase_: Optional[int] = x_na
def A__ ( lowerCamelCase ) -> float:
return math.pow(lowerCamelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 369
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = """ctrl"""
__UpperCamelCase : Dict = ["""past_key_values"""]
__UpperCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Dict , snake_case_ : Any=24_6534 , snake_case_ : Dict=256 , snake_case_ : str=1280 , snake_case_ : Optional[int]=8192 , snake_case_ : Union[str, Any]=48 , snake_case_ : Any=16 , snake_case_ : Optional[int]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Any=1e-6 , snake_case_ : Optional[Any]=0.02 , snake_case_ : Optional[int]=True , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Union[str, Any] = vocab_size
UpperCamelCase_: Union[str, Any] = n_positions
UpperCamelCase_: Optional[int] = n_embd
UpperCamelCase_: int = n_layer
UpperCamelCase_: str = n_head
UpperCamelCase_: Optional[int] = dff
UpperCamelCase_: Optional[Any] = resid_pdrop
UpperCamelCase_: Union[str, Any] = embd_pdrop
UpperCamelCase_: List[str] = layer_norm_epsilon
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = use_cache
super().__init__(**snake_case_ )
| 223
| 0
|
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : int = 0
snake_case__ : List[Any] = len(__lowerCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __lowerCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if len(__lowerCAmelCase ) <= 1:
return arr, 0
snake_case__ : Optional[Any] = len(__lowerCAmelCase ) // 2
snake_case__ : List[Any] = arr[0:mid]
snake_case__ : int = arr[mid:]
snake_case__ , snake_case__ : Tuple = count_inversions_recursive(__lowerCAmelCase )
snake_case__ , snake_case__ : Union[str, Any] = count_inversions_recursive(__lowerCAmelCase )
snake_case__ , snake_case__ : Optional[Any] = _count_cross_inversions(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : List[str] = []
snake_case__ : Tuple = 0
while i < len(__lowerCAmelCase ) and j < len(__lowerCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__lowerCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__lowerCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
snake_case__ : Any = count_inversions_bf(__lowerCAmelCase )
snake_case__ , snake_case__ : Dict = count_inversions_recursive(__lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __lowerCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
snake_case__ : Union[str, Any] = count_inversions_bf(__lowerCAmelCase )
snake_case__ , snake_case__ : Tuple = count_inversions_recursive(__lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __lowerCAmelCase )
# an empty list should also have zero inversions
snake_case__ : Optional[int] = []
snake_case__ : str = count_inversions_bf(__lowerCAmelCase )
snake_case__ , snake_case__ : List[str] = count_inversions_recursive(__lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 230
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = []
for part_id in partition_order:
snake_case__ : Any = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(__lowerCAmelCase ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(100 ).repartition(1 )
snake_case__ : Optional[int] = Spark(__lowerCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(10 ).repartition(2 )
snake_case__ : Any = [1, 0]
snake_case__ : Tuple = _generate_iterable_examples(__lowerCAmelCase , __lowerCAmelCase ) # Reverse the partitions.
snake_case__ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , __lowerCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__ , snake_case__ : Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : List[Any] = spark.range(10 ).repartition(1 )
snake_case__ : int = SparkExamplesIterable(__lowerCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Tuple = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
snake_case__ : Union[str, Any] = lambda __lowerCAmelCase : x.reverse()
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [2, 1, 0] )
snake_case__ : List[str] = SparkExamplesIterable(__lowerCAmelCase ).shuffle_data_sources(__lowerCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : List[str] = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(100 ).repartition(1 )
snake_case__ : Tuple = Spark(__lowerCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 230
| 1
|
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( _lowercase : list) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
a__ : int = {"""+""", """-""", """*""", """/"""}
a__ : list[Any] = []
for token in postfix_notation:
if token in operations:
a__ : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b)
elif token == "-":
stack.append(a - b)
elif token == "*":
stack.append(a * b)
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1)
else:
stack.append(a // b)
else:
stack.append(int(_lowercase))
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = ProphetNetTokenizer
__lowerCAmelCase :Any = False
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
a__ : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Any = """UNwant\u00E9d,running"""
a__ : Dict = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = self.tokenizer_class(self.vocab_file )
a__ : int = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__lowercase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : int = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : str = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : str = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
a__ : Dict = {}
for i, token in enumerate(__lowercase ):
a__ : Optional[Any] = i
a__ : str = WordpieceTokenizer(vocab=__lowercase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
a__ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a__ : Optional[Any] = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
a__ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors="""pt""" )
self.assertIsInstance(__lowercase , __lowercase )
a__ : Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
a__ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowercase )
a__ : str = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowercase )
a__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__lowercase )
a__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 266
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case : Tuple = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292
|
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
snake_case : Optional[Any] = sum(lowercase ) / len(lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=1_0_2_4 ) -> Optional[Any]:
'''simple docstring'''
__lowercase, __lowercase= [], []
__lowercase= list(zip(_a , _a ) )
__lowercase, __lowercase= sorted_examples[0]
def is_too_big(lowercase__ ):
return tok(_a , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowercase= new_src + ' ' + src
__lowercase= new_tgt + ' ' + tgt
if is_too_big(_a ) or is_too_big(_a ): # cant fit, finalize example
finished_src.append(_a )
finished_tgt.append(_a )
__lowercase, __lowercase= src, tgt
else: # can fit, keep adding
__lowercase, __lowercase= cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_a )
finished_tgt.append(_a )
return finished_src, finished_tgt
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= Path(_a )
save_path.mkdir(exist_ok=_a )
for split in ["train"]:
__lowercase, __lowercase= data_dir / F'{split}.source', data_dir / F'{split}.target'
__lowercase= [x.rstrip() for x in Path(_a ).open().readlines()]
__lowercase= [x.rstrip() for x in Path(_a ).open().readlines()]
__lowercase, __lowercase= pack_examples(_a , _a , _a , _a )
print(F'packed {split} split from {len(_a )} examples -> {len(_a )}.' )
Path(save_path / F'{split}.source' ).open('w' ).write('\n'.join(_a ) )
Path(save_path / F'{split}.target' ).open('w' ).write('\n'.join(_a ) )
for split in ["val", "test"]:
__lowercase, __lowercase= data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(_a , save_path / F'{split}.source' )
shutil.copyfile(_a , save_path / F'{split}.target' )
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=_a , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=_a , default=1_2_8 )
parser.add_argument('--data_dir' , type=_a )
parser.add_argument('--save_path' , type=_a )
__lowercase= parser.parse_args()
__lowercase= AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
if hor == 1_28:
__lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase = (32, 1_28, 2_56)
__lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
__lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase = (32, 64, 1_28, 2_56)
__lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
__lowerCAmelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
__lowerCAmelCase = model.state_dict()
__lowerCAmelCase = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_55_36,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
__lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 1_28, 2_56),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_55_36,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
__lowerCAmelCase = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
__lowerCAmelCase = model
__lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 92
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 92
| 1
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowercase =['text', 'image', 'audio']
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase : Any =[]
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =[]
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class __magic_name__ :
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs'))
self.assertTrue(hasattr(self.tool , 'outputs'))
_UpperCAmelCase : Union[str, Any] =self.tool.inputs
for _input in inputs:
if isinstance(_input , _SCREAMING_SNAKE_CASE):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
_UpperCAmelCase : Dict =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =create_inputs(self.tool.inputs)
_UpperCAmelCase : Optional[int] =self.tool(*_SCREAMING_SNAKE_CASE)
# There is a single output
if len(self.tool.outputs) == 1:
_UpperCAmelCase : Tuple =[outputs]
self.assertListEqual(output_types(_SCREAMING_SNAKE_CASE) , self.tool.outputs)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description'))
self.assertTrue(hasattr(self.tool , 'default_checkpoint'))
self.assertTrue(self.tool.description.startswith('This is a tool that'))
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict =create_inputs(self.tool.inputs)
_UpperCAmelCase : Dict =self.tool(*_SCREAMING_SNAKE_CASE)
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any =[outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(self.tool.outputs))
for output, output_type in zip(_SCREAMING_SNAKE_CASE , self.tool.outputs):
_UpperCAmelCase : List[Any] =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict =create_inputs(self.tool.inputs)
_UpperCAmelCase : Dict =[]
for _input, input_type in zip(_SCREAMING_SNAKE_CASE , self.tool.inputs):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
_UpperCAmelCase : Union[str, Any] =self.tool(*_SCREAMING_SNAKE_CASE)
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] =[outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(self.tool.outputs))
| 371
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict ='ZinengTang/tvlt-base'
_UpperCAmelCase : Dict =tempfile.mkdtemp()
def lowerCAmelCase ( self , **snake_case) -> Union[str, Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> Dict:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **snake_case)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any =self.get_image_processor()
_UpperCAmelCase : Optional[Any] =self.get_feature_extractor()
_UpperCAmelCase : str =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
processor.save_pretrained(self.tmpdirname)
_UpperCAmelCase : str =TvltProcessor.from_pretrained(self.tmpdirname)
self.assertIsInstance(processor.feature_extractor , snake_case)
self.assertIsInstance(processor.image_processor , snake_case)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str =self.get_image_processor()
_UpperCAmelCase : List[Any] =self.get_feature_extractor()
_UpperCAmelCase : str =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Optional[int] =np.ones([1_2_0_0_0])
_UpperCAmelCase : str =feature_extractor(snake_case , return_tensors='np')
_UpperCAmelCase : Dict =processor(audio=snake_case , return_tensors='np')
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict =self.get_image_processor()
_UpperCAmelCase : int =self.get_feature_extractor()
_UpperCAmelCase : int =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Union[str, Any] =np.ones([3, 2_2_4, 2_2_4])
_UpperCAmelCase : Optional[Any] =image_processor(snake_case , return_tensors='np')
_UpperCAmelCase : List[Any] =processor(images=snake_case , return_tensors='np')
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.get_image_processor()
_UpperCAmelCase : Dict =self.get_feature_extractor()
_UpperCAmelCase : Optional[int] =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Optional[int] =np.ones([1_2_0_0_0])
_UpperCAmelCase : str =np.ones([3, 2_2_4, 2_2_4])
_UpperCAmelCase : Optional[int] =processor(audio=snake_case , images=snake_case)
self.assertListEqual(list(inputs.keys()) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'])
# test if it raises when no input is passed
with pytest.raises(snake_case):
processor()
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.get_image_processor()
_UpperCAmelCase : Tuple =self.get_feature_extractor()
_UpperCAmelCase : Dict =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 242
| 0
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCAmelCase = (7_20, 12_80) # Height, Width
__UpperCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCAmelCase = 1 / 1_00
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = 2_50
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = get_dataset(lowercase__ , lowercase__ )
for index in range(lowercase__ ):
lowerCAmelCase_ :Union[str, Any] = random.sample(range(len(lowercase__ ) ) , 4 )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = update_image_and_anno(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , filter_scale=lowercase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase_ :Any = random_chars(3_2 )
lowerCAmelCase_ :Any = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCAmelCase_ :str = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , lowercase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
lowerCAmelCase_ :Union[str, Any] = []
for anno in new_annos:
lowerCAmelCase_ :str = anno[3] - anno[1]
lowerCAmelCase_ :Union[str, Any] = anno[4] - anno[2]
lowerCAmelCase_ :Optional[Any] = anno[1] + width / 2
lowerCAmelCase_ :Any = anno[2] + height / 2
lowerCAmelCase_ :Optional[Any] = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(lowercase__ )
with open(f"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> tuple[list, list]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = []
lowerCAmelCase_ :Optional[int] = []
for label_file in glob.glob(os.path.join(lowercase__ , """*.txt""" ) ):
lowerCAmelCase_ :Any = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(lowercase__ ) as in_file:
lowerCAmelCase_ :Any = in_file.readlines()
lowerCAmelCase_ :Union[str, Any] = os.path.join(lowercase__ , f"""{label_name}.jpg""" )
lowerCAmelCase_ :List[Any] = []
for obj_list in obj_lists:
lowerCAmelCase_ :Any = obj_list.rstrip("""\n""" ).split(""" """ )
lowerCAmelCase_ :str = float(obj[1] ) - float(obj[3] ) / 2
lowerCAmelCase_ :Dict = float(obj[2] ) - float(obj[4] ) / 2
lowerCAmelCase_ :Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2
lowerCAmelCase_ :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase__ )
labels.append(lowercase__ )
return img_paths, labels
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : list[int] , lowercase__ : tuple[int, int] , lowercase__ : tuple[float, float] , lowercase__ : float = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCAmelCase_ :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase_ :Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase_ :Union[str, Any] = int(scale_x * output_size[1] )
lowerCAmelCase_ :Tuple = int(scale_y * output_size[0] )
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Optional[Any] = []
for i, index in enumerate(lowercase__ ):
lowerCAmelCase_ :int = all_img_list[index]
path_list.append(lowercase__ )
lowerCAmelCase_ :Tuple = all_annos[index]
lowerCAmelCase_ :Optional[Any] = cva.imread(lowercase__ )
if i == 0: # top-left
lowerCAmelCase_ :Optional[int] = cva.resize(lowercase__ , (divid_point_x, divid_point_y) )
lowerCAmelCase_ :Optional[int] = img
for bbox in img_annos:
lowerCAmelCase_ :Union[str, Any] = bbox[1] * scale_x
lowerCAmelCase_ :Any = bbox[2] * scale_y
lowerCAmelCase_ :str = bbox[3] * scale_x
lowerCAmelCase_ :Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCAmelCase_ :List[Any] = cva.resize(lowercase__ , (output_size[1] - divid_point_x, divid_point_y) )
lowerCAmelCase_ :Dict = img
for bbox in img_annos:
lowerCAmelCase_ :int = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase_ :Optional[Any] = bbox[2] * scale_y
lowerCAmelCase_ :Any = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase_ :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCAmelCase_ :Any = cva.resize(lowercase__ , (divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase_ :Union[str, Any] = img
for bbox in img_annos:
lowerCAmelCase_ :Tuple = bbox[1] * scale_x
lowerCAmelCase_ :Dict = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase_ :Optional[Any] = bbox[3] * scale_x
lowerCAmelCase_ :Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCAmelCase_ :Tuple = cva.resize(
lowercase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase_ :Union[str, Any] = img
for bbox in img_annos:
lowerCAmelCase_ :str = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase_ :Any = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase_ :Any = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase_ :str = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCAmelCase_ :Any = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _snake_case ( lowercase__ : int ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase_ :Optional[int] = ascii_lowercase + digits
return "".join(random.choice(lowercase__ ) for _ in range(lowercase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 84
|
def _lowercase ( lowercase__ ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
__lowerCAmelCase : int = sorted(string.lower() )
return len(lowercase__ ) == len(set(lowercase__ ) )
if __name__ == "__main__":
_UpperCamelCase = input("Enter a string ").strip()
_UpperCamelCase = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 275
| 0
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCAmelCase : Any = random.Random()
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=1.0 , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
if rng is None:
a__ : Union[str, Any] =global_rng
a__ : Tuple =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=2_0_0_0 , lowerCAmelCase__=1_0 , lowerCAmelCase__=1_6_0 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=4_0_0_0 , lowerCAmelCase__=False , lowerCAmelCase__=True , ) -> Any:
'''simple docstring'''
a__ : str =parent
a__ : List[str] =batch_size
a__ : Any =min_seq_length
a__ : Tuple =max_seq_length
a__ : Dict =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ : Any =padding_value
a__ : Optional[Any] =sampling_rate
a__ : Optional[int] =return_attention_mask
a__ : Optional[Any] =do_normalize
a__ : Optional[int] =feature_size
a__ : Tuple =chunk_length
a__ : Optional[int] =hop_length
def _lowercase ( self ) -> int:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> List[Any]:
'''simple docstring'''
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a__ : Optional[int] =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ : List[Any] =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ : Tuple =[np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( __lowercase , unittest.TestCase):
_lowercase : Dict = WhisperFeatureExtractor if is_speech_available() else None
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Optional[Any] =WhisperFeatureExtractionTester(self )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : str =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] =feat_extract_first.save_pretrained(lowerCAmelCase__ )[0]
check_json_file_has_correct_format(lowerCAmelCase__ )
a__ : List[Any] =self.feature_extraction_class.from_pretrained(lowerCAmelCase__ )
a__ : int =feat_extract_first.to_dict()
a__ : str =feat_extract_second.to_dict()
a__ : str =feat_extract_first.mel_filters
a__ : Union[str, Any] =feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[str] =os.path.join(lowerCAmelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCAmelCase__ )
a__ : Union[str, Any] =self.feature_extraction_class.from_json_file(lowerCAmelCase__ )
a__ : Dict =feat_extract_first.to_dict()
a__ : List[str] =feat_extract_second.to_dict()
a__ : int =feat_extract_first.mel_filters
a__ : List[str] =feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Tuple =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ : str =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Union[str, Any] =[np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a__ : Tuple =feature_extractor(lowerCAmelCase__ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a__ : List[str] =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
a__ : str =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a__ : Tuple =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
a__ : int =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ : Optional[Any] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a__ : int =np.asarray(lowerCAmelCase__ )
a__ : Union[str, Any] =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
a__ : Tuple =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test truncation required
a__ : Optional[int] =[floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
a__ : List[str] =[np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
a__ : str =[x[: feature_extractor.n_samples] for x in speech_inputs]
a__ : List[str] =[np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs_truncated]
a__ : Optional[Any] =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
a__ : Any =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
a__ : List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Optional[Any] =np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
a__ : Optional[Any] =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ : List[Any] =feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a__ : List[Any] =feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowercase ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Any =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__ : str =ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] =torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
a__ : List[str] =self._load_datasamples(1 )
a__ : List[str] =WhisperFeatureExtractor()
a__ : int =feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCAmelCase__ , atol=1E-4 ) )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : str =self._load_datasamples(1 )[0]
a__ : int =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
a__ : List[str] =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCAmelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCAmelCase__ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ ) - 1 ) < 1E-3 ) )
| 363
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip_vision_model"""
def __init__( self , lowerCAmelCase__=1_4_0_8 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=3_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1E-6 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Tuple =hidden_size
a__ : Any =intermediate_size
a__ : Union[str, Any] =num_hidden_layers
a__ : Optional[Any] =num_attention_heads
a__ : List[str] =patch_size
a__ : int =image_size
a__ : Tuple =initializer_range
a__ : Any =attention_dropout
a__ : List[Any] =layer_norm_eps
a__ : Optional[Any] =hidden_act
a__ : Optional[Any] =qkv_bias
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
a__ , a__ : Optional[Any] =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Any =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip_qformer"""
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=2 , lowerCAmelCase__=1_4_0_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =vocab_size
a__ : Optional[Any] =hidden_size
a__ : str =num_hidden_layers
a__ : Optional[int] =num_attention_heads
a__ : Dict =hidden_act
a__ : Optional[int] =intermediate_size
a__ : Union[str, Any] =hidden_dropout_prob
a__ : Optional[int] =attention_probs_dropout_prob
a__ : List[Any] =max_position_embeddings
a__ : Union[str, Any] =initializer_range
a__ : Optional[int] =layer_norm_eps
a__ : int =position_embedding_type
a__ : int =cross_attention_frequency
a__ : Tuple =encoder_hidden_size
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
a__ , a__ : str =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Optional[int] =config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip"""
_lowercase : List[Any] = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=3_2 , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
a__ : List[Any] ={}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
a__ : Tuple ={}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
a__ : Dict ={}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
a__ : Dict =InstructBlipVisionConfig(**lowerCAmelCase__ )
a__ : Union[str, Any] =InstructBlipQFormerConfig(**lowerCAmelCase__ )
a__ : Tuple =text_config["model_type"] if "model_type" in text_config else "opt"
a__ : List[str] =CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
a__ : Union[str, Any] =self.text_config.tie_word_embeddings
a__ : Optional[Any] =self.text_config.is_encoder_decoder
a__ : str =num_query_tokens
a__ : List[Any] =self.vision_config.hidden_size
a__ : str =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a__ : List[Any] =1.0
a__ : List[str] =0.02
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : int =copy.deepcopy(self.__dict__ )
a__ : int =self.vision_config.to_dict()
a__ : str =self.qformer_config.to_dict()
a__ : str =self.text_config.to_dict()
a__ : List[str] =self.__class__.model_type
return output
| 148
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowercase_ : List[str] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = ['BeitFeatureExtractor']
lowercase_ : List[Any] = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 133
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowercase_ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : str , *snake_case__ : Any , **snake_case__ : str ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , "vision" )
self.check_model_type(snake_case__ )
def __call__( self : Dict , snake_case__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case__ : List[Any] ):
"""simple docstring"""
return super().__call__(snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
return {}, {}, {}
def UpperCamelCase ( self : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = load_image(snake_case__ )
_UpperCAmelCase = image.size
_UpperCAmelCase = self.image_processor(images=snake_case__ , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self : int , snake_case__ : int ):
"""simple docstring"""
_UpperCAmelCase = self.model(**snake_case__ )
return model_outputs
def UpperCamelCase ( self : List[Any] , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = model_outputs.predicted_depth
_UpperCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=snake_case__ )
_UpperCAmelCase = prediction.squeeze().cpu().numpy()
_UpperCAmelCase = (output * 255 / np.max(snake_case__ )).astype("uint8" )
_UpperCAmelCase = Image.fromarray(snake_case__ )
_UpperCAmelCase = {}
_UpperCAmelCase = predicted_depth
_UpperCAmelCase = depth
return output_dict
| 133
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = 3_84
A_ : Dict = 7
if "tiny" in model_name:
A_ : Dict = 96
A_ : Dict = (2, 2, 6, 2)
A_ : int = (3, 6, 12, 24)
elif "small" in model_name:
A_ : List[str] = 96
A_ : Dict = (2, 2, 18, 2)
A_ : Optional[int] = (3, 6, 12, 24)
elif "base" in model_name:
A_ : Union[str, Any] = 1_28
A_ : List[Any] = (2, 2, 18, 2)
A_ : Optional[Any] = (4, 8, 16, 32)
A_ : Optional[int] = 12
A_ : Dict = 5_12
elif "large" in model_name:
A_ : str = 1_92
A_ : Tuple = (2, 2, 18, 2)
A_ : int = (6, 12, 24, 48)
A_ : str = 12
A_ : List[Any] = 7_68
# set label information
A_ : Optional[int] = 1_50
A_ : Dict = """huggingface/label-files"""
A_ : Dict = """ade20k-id2label.json"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : int = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
A_ : int = {v: k for k, v in idalabel.items()}
A_ : Union[str, Any] = SwinConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , num_heads=lowerCamelCase__ , window_size=lowerCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
A_ : Union[str, Any] = UperNetConfig(
backbone_config=lowerCamelCase__ , auxiliary_in_channels=lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , )
return config
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Any = val
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A_ : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A_ : Dict = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
A_ : int = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:dim, :]
A_ : Union[str, Any] = in_proj_bias[: dim]
A_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
A_ : List[Any] = in_proj_bias[
dim : dim * 2
]
A_ : Optional[int] = in_proj_weight[
-dim :, :
]
A_ : str = in_proj_bias[-dim :]
# fmt: on
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_, A_ : Any = x.shape
A_ : List[Any] = x.reshape(lowerCamelCase__ , 4 , in_channel // 4 )
A_ : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase__ , lowerCamelCase__ )
return x
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_, A_ : Tuple = x.shape
A_ : List[Any] = x.reshape(lowerCamelCase__ , in_channel // 4 , 4 )
A_ : List[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase__ , lowerCamelCase__ )
return x
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = x.shape[0]
A_ : Optional[Any] = x.reshape(4 , in_channel // 4 )
A_ : Tuple = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase__ )
return x
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = x.shape[0]
A_ : int = x.reshape(in_channel // 4 , 4 )
A_ : Optional[Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase__ )
return x
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
A_ : Union[str, Any] = model_name_to_url[model_name]
A_ : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , file_name=lowerCamelCase__ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase__ , param.shape )
A_ : Union[str, Any] = get_upernet_config(lowerCamelCase__ )
A_ : Optional[int] = UperNetForSemanticSegmentation(lowerCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A_ : Any = state_dict.pop(lowerCamelCase__ )
if "bn" in key:
A_ : Optional[Any] = key.replace("""bn""" , """batch_norm""" )
A_ : Union[str, Any] = val
# rename keys
A_ : Optional[int] = create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A_ : Dict = reverse_correct_unfold_reduction_order(lowerCamelCase__ )
if "norm" in key:
A_ : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify on image
A_ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
A_ : Any = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
A_ : List[str] = SegformerImageProcessor()
A_ : List[Any] = processor(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
A_ : int = model(lowerCamelCase__ )
A_ : Any = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A_ : str = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
A_ : Dict = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
A_ : Union[str, Any] = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
A_ : int = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 135
|
'''simple docstring'''
lowerCamelCase :Any = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCamelCase :Any = {value: key for key, value in MORSE_CODE_DICT.items()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def a ( ):
'''simple docstring'''
A_ : List[str] = """Morse code here!"""
print(lowerCamelCase__ )
A_ : str = encrypt(lowerCamelCase__ )
print(lowerCamelCase__ )
A_ : Dict = decrypt(lowerCamelCase__ )
print(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 135
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = (DEISMultistepScheduler,)
lowercase__ = (("num_inference_steps", 25),)
def __lowerCAmelCase ( self : Optional[Any] ,**lowercase_ : List[str] ):
lowerCAmelCase__ : List[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**lowercase_ )
return config
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Optional[int]=0 ,**lowercase_ : Any ):
lowerCAmelCase__ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Union[str, Any] = kwargs.pop('''num_inference_steps''' ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample
lowerCAmelCase__ : str = 0.1 * sample
lowerCAmelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : List[Any] = self.get_scheduler_config(**lowercase_ )
lowerCAmelCase__ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowerCAmelCase__ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowerCAmelCase__ : Optional[Any] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowerCAmelCase__ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase__ ,lowerCAmelCase__ : int = sample, sample
for t in range(lowercase_ ,time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase__ : Tuple = scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
lowerCAmelCase__ : int = new_scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[str] ):
pass
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : str=0 ,**lowercase_ : List[Any] ):
lowerCAmelCase__ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Union[str, Any] = kwargs.pop('''num_inference_steps''' ,lowercase_ )
lowerCAmelCase__ : Optional[int] = self.dummy_sample
lowerCAmelCase__ : Any = 0.1 * sample
lowerCAmelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : str = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowerCAmelCase__ : Any = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase__ : Tuple = scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
lowerCAmelCase__ : Any = new_scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Dict ,lowercase_ : str=None ,**lowercase_ : str ):
if scheduler is None:
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config(**lowercase_ )
lowerCAmelCase__ : Tuple = scheduler_class(**lowercase_ )
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config(**lowercase_ )
lowerCAmelCase__ : List[str] = scheduler_class(**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = 1_0
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Optional[int] = model(lowercase_ ,lowercase_ )
lowerCAmelCase__ : int = scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ).prev_sample
return sample
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Union[str, Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Dict = kwargs.pop('''num_inference_steps''' ,lowercase_ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**lowercase_ )
lowerCAmelCase__ : int = self.dummy_sample
lowerCAmelCase__ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ ,'''set_timesteps''' ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ ,'''set_timesteps''' ):
lowerCAmelCase__ : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase__ : int = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase__ : str = scheduler.timesteps[5]
lowerCAmelCase__ : Optional[Any] = scheduler.timesteps[6]
lowerCAmelCase__ : Dict = scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
lowerCAmelCase__ : List[str] = scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : int ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase__ : str = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase__ : str = self.full_loop(scheduler=lowercase_ )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
lowerCAmelCase__ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : str = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : Dict = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ : List[Any] = self.full_loop(scheduler=lowercase_ )
lowerCAmelCase__ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def __lowerCAmelCase ( self : Optional[int] ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
self.check_over_configs(thresholding=lowercase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase_ ,prediction_type=lowercase_ ,sample_max_value=lowercase_ ,algorithm_type='''deis''' ,solver_order=lowercase_ ,solver_type=lowercase_ ,)
def __lowerCAmelCase ( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def __lowerCAmelCase ( self : List[Any] ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase_ ,solver_type=lowercase_ ,prediction_type=lowercase_ ,algorithm_type=lowercase_ ,)
lowerCAmelCase__ : str = self.full_loop(
solver_order=lowercase_ ,solver_type=lowercase_ ,prediction_type=lowercase_ ,algorithm_type=lowercase_ ,)
assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self : List[Any] ):
self.check_over_configs(lower_order_final=lowercase_ )
self.check_over_configs(lower_order_final=lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowercase_ ,time_step=0 )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = self.full_loop()
lowerCAmelCase__ : Any = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1E-3
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Union[str, Any] = self.get_scheduler_config(thresholding=lowercase_ ,dynamic_thresholding_ratio=0 )
lowerCAmelCase__ : Optional[Any] = scheduler_class(**lowercase_ )
lowerCAmelCase__ : Optional[Any] = 1_0
lowerCAmelCase__ : Optional[Any] = self.dummy_model()
lowerCAmelCase__ : str = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = model(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[Any] = scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ).prev_sample
assert sample.dtype == torch.floataa
| 106
|
"""simple docstring"""
__UpperCamelCase : Optional[Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
__UpperCamelCase : Tuple = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
__UpperCamelCase : Optional[int] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
__UpperCamelCase : Tuple = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
__UpperCamelCase : str = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
__UpperCamelCase : Dict = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
__UpperCamelCase : int = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
__UpperCamelCase : Optional[int] = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 106
| 1
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__A : List[str] = pytest.mark.integration
__A : List[Any] = {'''comet'''}
__A : List[str] = importlib.util.find_spec('''fairseq''') is not None
__A : Dict = {'''code_eval'''}
__A : List[Any] = os.name == '''nt'''
__A : Tuple = {'''bertscore''', '''frugalscore''', '''perplexity'''}
__A : Union[str, Any] = importlib.util.find_spec('''transformers''') is not None
def A_ ( snake_case_ : Optional[int] ):
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : Union[str, Any] ,snake_case_ : List[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self ,snake_case_ )
return wrapper
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : Optional[int] ,snake_case_ : Optional[Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self ,snake_case_ )
return wrapper
def A_ ( snake_case_ : Union[str, Any] ):
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : Optional[Any] ,snake_case_ : Dict ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self ,snake_case_ )
return wrapper
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@local
class lowerCamelCase ( parameterized.TestCase ):
lowercase : List[str] = {}
lowercase : Optional[Any] = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = """[...]"""
UpperCamelCase : List[str] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_ ) ).module_path )
UpperCamelCase : int = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE_ )
# check parameters
UpperCamelCase : int = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(SCREAMING_SNAKE_CASE_ , metric_module.__name__ ):
with self.use_local_metrics():
try:
UpperCamelCase : int = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = """[...]"""
UpperCamelCase : int = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_ ) ).module_path )
# run doctest
with self.use_local_metrics():
UpperCamelCase : str = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE_ ):
yield
else:
yield
@contextmanager
def a_ ( self ):
def load_local_metric(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return load_metric(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_ ) , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
UpperCamelCase : Any = load_local_metric
yield
@classmethod
def a_ ( cls , SCREAMING_SNAKE_CASE_ ):
def wrapper(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = contextmanager(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" ,"""""" ,"""""" ) # handle pytest cli flags
class lowerCamelCase ( _UpperCAmelCase ):
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
UpperCamelCase : Tuple = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def A_ ( snake_case_ : Any ):
'''simple docstring'''
import torch
def bert_cos_score_idf(snake_case_ : List[Any] ,snake_case_ : int ,*snake_case_ : List[Any] ,**snake_case_ : Tuple ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
UpperCamelCase : Optional[int] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
def load_from_checkpoint(snake_case_ : List[str] ):
class lowerCamelCase :
def a_ ( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
assert len(SCREAMING_SNAKE_CASE_ ) == 2
UpperCamelCase : Dict = [0.19, 0.92]
return scores, sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
UpperCamelCase : Dict = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
UpperCamelCase : Tuple = load_from_checkpoint
yield
def A_ ( ):
'''simple docstring'''
UpperCamelCase : List[str] = load_metric(os.path.join("""metrics""" ,"""seqeval""" ) )
UpperCamelCase : List[Any] = """ERROR"""
UpperCamelCase : Union[str, Any] = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(snake_case_ ,match=re.escape(snake_case_ ) ):
metric.compute(predictions=[] ,references=[] ,scheme=snake_case_ )
| 27
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Union[str, Any] = 'EncodecFeatureExtractor'
lowercase : List[Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.feature_extractor
UpperCamelCase : Any = False
def a_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=SCREAMING_SNAKE_CASE_ , language=SCREAMING_SNAKE_CASE_ , no_timestamps=SCREAMING_SNAKE_CASE_ )
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = kwargs.pop("""audio""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = kwargs.pop("""sampling_rate""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = kwargs.pop("""text""" , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : Any = args[0]
UpperCamelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
UpperCamelCase : Optional[int] = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audio is not None:
UpperCamelCase : str = self.feature_extractor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCamelCase : int = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
UpperCamelCase : Optional[Any] = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = kwargs.pop("""audio""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = kwargs.pop("""padding_mask""" , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : Optional[int] = args[0]
UpperCamelCase : Any = args[1:]
if audio_values is not None:
return self._decode_audio(SCREAMING_SNAKE_CASE_ , padding_mask=SCREAMING_SNAKE_CASE_ )
else:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Dict = to_numpy(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = audio_values.shape
if padding_mask is None:
return list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = to_numpy(SCREAMING_SNAKE_CASE_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCamelCase : List[str] = seq_len - padding_mask.shape[-1]
UpperCamelCase : Optional[int] = 1 - self.feature_extractor.padding_value
UpperCamelCase : Any = np.pad(SCREAMING_SNAKE_CASE_ , ((0, 0), (0, difference)) , """constant""" , constant_values=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audio_values.tolist()
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCamelCase : Optional[Any] = sliced_audio.reshape(SCREAMING_SNAKE_CASE_ , -1 )
return audio_values
| 27
| 1
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__lowercase = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
__lowercase = dataset.iloc[:, 1:2].values
__lowercase = dataset.iloc[:, 2].values
__lowercase , __lowercase , __lowercase , __lowercase = train_test_split(X, y, test_size=0.2, random_state=0)
__lowercase = PolynomialFeatures(degree=4)
__lowercase = poly_reg.fit_transform(X)
__lowercase = LinearRegression()
pol_reg.fit(X_poly, y)
def lowercase ( )-> List[Any]:
'''simple docstring'''
plt.scatter(A_ , A_ , color="red" )
plt.plot(A_ , pol_reg.predict(poly_reg.fit_transform(A_ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 40
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Union[str, Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : Tuple = original_name.split(""".""")[0]
a_ : List[Any] = key.split(""".""")
a_ : List[Any] = int(key_list[key_list.index(a__) - 2])
a_ : Dict = int(key_list[key_list.index(a__) - 1])
a_ : Any = orig_block_num - offset
a_ : Optional[int] = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''')
return key
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = OrderedDict()
a_ , a_ : Optional[int] = 0, 0
for key, value in state_dict.items():
if key.startswith("""network"""):
a_ : str = key.replace("""network""" , """poolformer.encoder""")
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""") and "patch_embed" not in key:
patch_emb_offset += 1
a_ : Tuple = key[: key.find("""proj""")]
a_ : Dict = key.replace(a__ , f'''patch_embeddings.{total_embed_found}.''')
a_ : Optional[Any] = key.replace("""proj""" , """projection""")
if key.endswith("""bias"""):
total_embed_found += 1
if "patch_embeddings" in key:
a_ : int = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
a_ : Union[str, Any] = replace_key_with_offset(a__ , a__ , """mlp.fc1""" , """output.conv1""")
if "mlp.fc2" in key:
a_ : str = replace_key_with_offset(a__ , a__ , """mlp.fc2""" , """output.conv2""")
if "norm1" in key:
a_ : str = replace_key_with_offset(a__ , a__ , """norm1""" , """before_norm""")
if "norm2" in key:
a_ : Any = replace_key_with_offset(a__ , a__ , """norm2""" , """after_norm""")
if "layer_scale_1" in key:
a_ : List[Any] = replace_key_with_offset(a__ , a__ , """layer_scale_1""" , """layer_scale_1""")
if "layer_scale_2" in key:
a_ : Optional[Any] = replace_key_with_offset(a__ , a__ , """layer_scale_2""" , """layer_scale_2""")
if "head" in key:
a_ : Optional[Any] = key.replace("""head""" , """classifier""")
a_ : Union[str, Any] = value
return new_state_dict
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Any = Image.open(requests.get(a__ , stream=a__).raw)
return image
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : str = PoolFormerConfig()
# set attributes based on model_name
a_ : Union[str, Any] = """huggingface/label-files"""
a_ : str = model_name[-3:]
a_ : Tuple = 1_0_0_0
a_ : List[str] = """imagenet-1k-id2label.json"""
a_ : Any = (1, 1_0_0_0)
# set config attributes
a_ : Optional[Any] = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""") , """r"""))
a_ : List[Any] = {int(a__): v for k, v in idalabel.items()}
a_ : Tuple = idalabel
a_ : int = {v: k for k, v in idalabel.items()}
if size == "s12":
a_ : Optional[int] = [2, 2, 6, 2]
a_ : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : List[Any] = 4.0
a_ : Tuple = 0.9
elif size == "s24":
a_ : List[Any] = [4, 4, 1_2, 4]
a_ : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : List[Any] = 4.0
a_ : Optional[Any] = 0.9
elif size == "s36":
a_ : str = [6, 6, 1_8, 6]
a_ : Dict = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : Optional[int] = 4.0
a_ : Optional[int] = 1e-6
a_ : Tuple = 0.9
elif size == "m36":
a_ : str = [6, 6, 1_8, 6]
a_ : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
a_ : str = 4.0
a_ : Union[str, Any] = 1e-6
a_ : str = 0.95
elif size == "m48":
a_ : List[Any] = [8, 8, 2_4, 8]
a_ : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
a_ : int = 4.0
a_ : int = 1e-6
a_ : List[Any] = 0.95
else:
raise ValueError(f'''Size {size} not supported''')
# load image processor
a_ : Tuple = PoolFormerImageProcessor(crop_pct=a__)
# Prepare image
a_ : List[Any] = prepare_img()
a_ : List[str] = image_processor(images=a__ , return_tensors="""pt""").pixel_values
logger.info(f'''Converting model {model_name}...''')
# load original state dict
a_ : List[str] = torch.load(a__ , map_location=torch.device("""cpu"""))
# rename keys
a_ : List[Any] = rename_keys(a__)
# create HuggingFace model and load state dict
a_ : List[str] = PoolFormerForImageClassification(a__)
model.load_state_dict(a__)
model.eval()
# Define image processor
a_ : Tuple = PoolFormerImageProcessor(crop_pct=a__)
a_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""").pixel_values
# forward pass
a_ : Any = model(a__)
a_ : Any = outputs.logits
# define expected logit slices for different models
if size == "s12":
a_ : Union[str, Any] = torch.tensor([-0.3045, -0.6758, -0.4869])
elif size == "s24":
a_ : Optional[Any] = torch.tensor([0.4402, -0.1374, -0.8045])
elif size == "s36":
a_ : int = torch.tensor([-0.6080, -0.5133, -0.5898])
elif size == "m36":
a_ : List[str] = torch.tensor([0.3952, 0.2263, -1.2668])
elif size == "m48":
a_ : Union[str, Any] = torch.tensor([0.1167, -0.0656, -0.3423])
else:
raise ValueError(f'''Size {size} not supported''')
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2)
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(a__).mkdir(exist_ok=a__)
model.save_pretrained(a__)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(a__)
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__snake_case : Optional[int] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 248
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ = logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
a__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PIL.Image.BICUBIC , lowercase = True , lowercase = None , lowercase = 1 / 2_55 , lowercase = True , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase)
a__: str = size if size is not None else {'height': 2_56, 'width': 2_56}
a__: str = get_size_dict(lowercase)
a__: List[str] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a__: List[str] = get_size_dict(lowercase , param_name='crop_size')
a__: Tuple = do_resize
a__: Tuple = size
a__: Union[str, Any] = resample
a__: int = do_center_crop
a__: Tuple = crop_size
a__: Optional[int] = do_rescale
a__: Optional[Any] = rescale_factor
a__: Optional[int] = do_normalize
a__: Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__: Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = PIL.Image.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
a__: int = get_size_dict(lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}')
return resize(
lowercase , size=(size['height'], size['width']) , resample=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
a__: Union[str, Any] = get_size_dict(lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}')
return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Dict:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
a__: Any = do_resize if do_resize is not None else self.do_resize
a__: Union[str, Any] = resample if resample is not None else self.resample
a__: int = do_center_crop if do_center_crop is not None else self.do_center_crop
a__: Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
a__: Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
a__: Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
a__: Union[str, Any] = image_mean if image_mean is not None else self.image_mean
a__: Any = image_std if image_std is not None else self.image_std
a__: str = size if size is not None else self.size
a__: Optional[int] = get_size_dict(lowercase)
a__: str = crop_size if crop_size is not None else self.crop_size
a__: Optional[int] = get_size_dict(lowercase , param_name='crop_size')
a__: List[Any] = make_list_of_images(lowercase)
if not valid_images(lowercase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__: Optional[int] = [to_numpy_array(lowercase) for image in images]
if do_resize:
a__: str = [self.resize(image=lowercase , size=lowercase , resample=lowercase) for image in images]
if do_center_crop:
a__: List[str] = [self.center_crop(image=lowercase , size=lowercase) for image in images]
if do_rescale:
a__: Tuple = [self.rescale(image=lowercase , scale=lowercase) for image in images]
if do_normalize:
a__: Any = [self.normalize(image=lowercase , mean=lowercase , std=lowercase) for image in images]
a__: Dict = [to_channel_dimension_format(lowercase , lowercase) for image in images]
a__: Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase)
| 203
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
torch.manual_seed(0)
a__: str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
a__: List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Union[str, Any] = self.dummy_uncond_unet
a__: Optional[int] = DDIMScheduler()
a__: Optional[int] = self.dummy_vq_model
a__: Union[str, Any] = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase)
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: str = torch.manual_seed(0)
a__: Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy').images
a__: Union[str, Any] = torch.manual_seed(0)
a__: int = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase)[0]
a__: Union[str, Any] = image[0, -3:, -3:, -1]
a__: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__: int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
a__: Optional[Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256')
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: List[str] = torch.manual_seed(0)
a__: Optional[int] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy').images
a__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
a__: int = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447])
a__: Any = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
| 203
| 1
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __UpperCAmelCase ( __a : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
_a : Optional[Any] = XGBClassifier()
classifier.fit(__a ,__a )
return classifier
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : Union[str, Any] = load_iris()
_a , _a : Union[str, Any] = data_handling(__a )
_a , _a , _a , _a : str = train_test_split(
__a ,__a ,test_size=0.25 )
_a : List[Any] = iris['''target_names''']
# Create an XGBoost Classifier from the training data
_a : Optional[int] = xgboost(__a ,__a )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__a ,__a ,__a ,display_labels=__a ,cmap='''Blues''' ,normalize='''true''' ,)
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 235
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a__ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
UpperCAmelCase__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = self.task_name.lower()
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "train"
UpperCAmelCase__ : List[str] = "dev"
UpperCAmelCase__ : Union[str, Any] = "test"
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : GlueDataTrainingArguments
UpperCAmelCase__ : str
UpperCAmelCase__ : List[InputFeatures]
def __init__( self , _a , _a , _a = None , _a = Split.train , _a = None , ) -> str:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , _a , )
_a : List[str] = args
_a : List[str] = glue_processors[args.task_name]()
_a : List[Any] = glue_output_modes[args.task_name]
if isinstance(_a , _a ):
try:
_a : Any = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_a : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_a : str = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a : Optional[int] = label_list[2], label_list[1]
_a : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
_a : List[Any] = time.time()
_a : str = torch.load(_a )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_a : str = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_a : Any = self.processor.get_test_examples(args.data_dir )
else:
_a : List[str] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_a : Dict = examples[:limit_length]
_a : Union[str, Any] = glue_convert_examples_to_features(
_a , _a , max_length=args.max_seq_length , label_list=_a , output_mode=self.output_mode , )
_a : Optional[int] = time.time()
torch.save(self.features , _a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , _a ) -> InputFeatures:
return self.features[i]
def __lowercase ( self ) -> Tuple:
return self.label_list
| 235
| 1
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
def merge(SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return collection
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = input('''Enter numbers separated by a comma:\n''').strip()
snake_case_ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 216
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
snake_case_ = {
'''junnyu/roformer_chinese_small''': 1_536,
'''junnyu/roformer_chinese_base''': 1_536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
snake_case_ = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Tuple = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowercase__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('lowercase' , a) != do_lower_case
or pre_tok_state.get('strip_accents' , a) != strip_accents
):
lowercase__ : Optional[int] = getattr(a , pre_tok_state.pop('type'))
lowercase__ : str = do_lower_case
lowercase__ : Union[str, Any] = strip_accents
lowercase__ : int = pre_tok_class(**a)
lowercase__ : Optional[int] = do_lower_case
def __getstate__( self):
lowercase__ : str = self.__dict__.copy()
lowercase__ : Any = BertPreTokenizer()
return state
def __setstate__( self , a):
lowercase__ : Union[str, Any] = d
lowercase__ : int = self.__dict__['_tokenizer'].get_vocab()
lowercase__ : List[str] = PreTokenizer.custom(JiebaPreTokenizer(a))
def snake_case_ ( self , a , a=None):
lowercase__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a , a = None):
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case_ ( self , a , a = None):
lowercase__ : Optional[Any] = self._tokenizer.model.save(a , name=a)
return tuple(a)
def snake_case_ ( self , a , a=None , a=None , a=False , **a , ):
lowercase__ : List[str] = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a)
| 216
| 1
|
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCamelCase : List[str] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__(*UpperCamelCase , **UpperCamelCase )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
__UpperCAmelCase : Tuple = {}
if frame_sampling_rate is not None:
__UpperCAmelCase : Dict = frame_sampling_rate
if num_frames is not None:
__UpperCAmelCase : Union[str, Any] = num_frames
__UpperCAmelCase : int = {}
if top_k is not None:
__UpperCAmelCase : Union[str, Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , UpperCamelCase : Union[str, List[str]] , **UpperCamelCase : Dict ):
'''simple docstring'''
return super().__call__(UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : str , UpperCamelCase : str , UpperCamelCase : Any=None , UpperCamelCase : Optional[Any]=1 ):
'''simple docstring'''
if num_frames is None:
__UpperCAmelCase : str = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__UpperCAmelCase : Any = BytesIO(requests.get(UpperCamelCase ).content )
__UpperCAmelCase : List[str] = VideoReader(UpperCamelCase )
videoreader.seek(0 )
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Optional[Any] = num_frames * frame_sampling_rate - 1
__UpperCAmelCase : str = np.linspace(UpperCamelCase , UpperCamelCase , num=UpperCamelCase , dtype=np.intaa )
__UpperCAmelCase : str = videoreader.get_batch(UpperCamelCase ).asnumpy()
__UpperCAmelCase : List[Any] = list(UpperCamelCase )
__UpperCAmelCase : Tuple = self.image_processor(UpperCamelCase , return_tensors=self.framework )
return model_inputs
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model(**UpperCamelCase )
return model_outputs
def lowerCamelCase__ ( self : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__UpperCAmelCase : Optional[int] = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase : List[Any] = model_outputs.logits.softmax(-1 )[0]
__UpperCAmelCase ,__UpperCAmelCase : Any = probs.topk(UpperCamelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase : Optional[Any] = scores.tolist()
__UpperCAmelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase , UpperCamelCase )]
| 115
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 115
| 1
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase_ = HfApi()
lowercase_ = {}
# fmt: off
lowercase_ = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
lowercase_ = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
lowercase_ = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
lowercase_ = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
lowercase_ = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
lowercase_ = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
lowercase_ = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
lowercase_ = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
lowercase_ = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
lowercase_ = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
lowercase_ = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
lowercase_ = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
lowercase_ = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
lowercase_ = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
lowercase_ = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
lowercase_ = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase_ = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
lowercase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
lowercase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase_ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 362
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase_ = getLogger(__name__)
lowercase_ = "cuda" if torch.cuda.is_available() else "cpu"
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict="summarization" , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : str , ) -> Dict:
'''simple docstring'''
A__ = Path(SCREAMING_SNAKE_CASE__ ).open('w' , encoding='utf-8' )
A__ = str(SCREAMING_SNAKE_CASE__ )
A__ = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
if fpaa:
A__ = model.half()
A__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
A__ = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if prefix is None:
A__ = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ):
A__ = [prefix + text for text in examples_chunk]
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE__ , padding='longest' ).to(SCREAMING_SNAKE_CASE__ )
A__ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE__ , )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
A__ = int(time.time() - start_time ) # seconds
A__ = len(SCREAMING_SNAKE_CASE__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _snake_case( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int]=True ) -> Dict:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument('model_name' , type=SCREAMING_SNAKE_CASE__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=SCREAMING_SNAKE_CASE__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=SCREAMING_SNAKE_CASE__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=SCREAMING_SNAKE_CASE__ , default=8 , required=SCREAMING_SNAKE_CASE__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=SCREAMING_SNAKE_CASE__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
A__ , A__ = parser.parse_known_args()
A__ = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE__ )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
A__ = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
A__ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
A__ = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE__ , )
if args.reference_path is None:
return {}
# Compute scores
A__ = calculate_bleu if 'translation' in args.task else calculate_rouge
A__ = [x.rstrip() for x in open(args.save_path ).readlines()]
A__ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE__ )]
A__ = score_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
scores.update(SCREAMING_SNAKE_CASE__ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE__ )
if args.info:
A__ = args.info
if verbose:
print(SCREAMING_SNAKE_CASE__ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 282
| 0
|
def lowerCamelCase__ ( a = 10_00 ) -> int:
_A , _A: Tuple = 1, 1
_A: Optional[Any] = []
for i in range(1 , n + 1 ):
_A: Dict = prev_numerator + 2 * prev_denominator
_A: Union[str, Any] = prev_numerator + prev_denominator
if len(str(a ) ) > len(str(a ) ):
result.append(a )
_A: Tuple = numerator
_A: str = denominator
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 121
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( a ) -> int:
_A: int = filter(lambda a : p.requires_grad , model.parameters() )
_A: Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ : str = logging.getLogger(__name__)
def lowerCamelCase__ ( a , a ) -> Dict:
if metric == "rouge2":
_A: Tuple = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_A: List[str] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_A: str = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_A: List[str] = ModelCheckpoint(
dirpath=a , filename=a , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a , verbose=a , )
class UpperCAmelCase ( pl.Callback ):
'''simple docstring'''
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: Union[str, Any] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=True ):
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_A: Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_A: Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A: List[str] = od / '''test_results.txt'''
_A: Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A: Any = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_A: Dict = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''a+''' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A: Optional[int] = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
_A: List[str] = val.item()
_A: List[Any] = F"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
_A: Optional[int] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCAmelCase_ )
@rank_zero_only
def __magic_name__ ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
try:
_A: Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
_A: Any = pl_module.model.num_parameters()
_A: Optional[int] = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , '''test''' )
@rank_zero_only
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 121
| 1
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __lowerCAmelCase ( lowercase : str , lowercase : str , lowercase : str , lowercase : PreTrainedTokenizer , lowercase : int , lowercase : Optional[int] = None , ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = {}
if train_file is not None:
snake_case : str = [train_file]
if eval_file is not None:
snake_case : Optional[Any] = [eval_file]
if test_file is not None:
snake_case : Tuple = [test_file]
snake_case : List[str] = datasets.load_dataset("csv" , data_files=lowercase )
snake_case : str = list(ds[list(files.keys() )[0]].features.keys() )
snake_case : int = features_name.pop(lowercase )
snake_case : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case : Any = {label: i for i, label in enumerate(lowercase )}
snake_case : List[Any] = tokenizer.model_input_names
snake_case : Union[str, Any] = {}
if len(lowercase ) == 1:
for k in files.keys():
snake_case : Any = ds[k].map(
lambda lowercase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase , max_length=lowercase , padding="max_length" ) , batched=lowercase , )
elif len(lowercase ) == 2:
for k in files.keys():
snake_case : Union[str, Any] = ds[k].map(
lambda lowercase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase , max_length=lowercase , padding="max_length" , ) , batched=lowercase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case : List[str] = {k: v for k, v in ex.items() if k in input_names}
snake_case : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
snake_case : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case : int = {k: v for k, v in ex.items() if k in input_names}
snake_case : List[str] = labelaid[ex[label_name]]
yield (d, label)
snake_case : Optional[Any] = (
tf.data.Dataset.from_generator(
lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case : Optional[int] = (
tf.data.Dataset.from_generator(
lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case : Optional[Any] = (
tf.data.Dataset.from_generator(
lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case : int = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__snake_case = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : int = field(metadata={'''help''': '''Which column contains the label'''} )
__UpperCAmelCase : str = field(default=snake_case_ , metadata={'''help''': '''The path of the training file'''} )
__UpperCAmelCase : Optional[str] = field(default=snake_case_ , metadata={'''help''': '''The path of the development file'''} )
__UpperCAmelCase : Optional[str] = field(default=snake_case_ , metadata={'''help''': '''The path of the test file'''} )
__UpperCAmelCase : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__UpperCAmelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__UpperCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : bool = field(default=snake_case_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase ) , labelaid=lowercase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case : List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase : EvalPrediction ) -> Dict:
snake_case : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case : Any = TFTrainer(
model=lowercase , args=lowercase , train_dataset=lowercase , eval_dataset=lowercase , compute_metrics=lowercase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Any = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Dict = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase )
return results
if __name__ == "__main__":
main()
| 358
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""ConditionalDetrFeatureExtractor"""]
__snake_case = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112
| 0
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 343
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
UpperCamelCase = list(UpperCamelCase_ )
return word_list
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
UpperCamelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = """##""" + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 343
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ :List[Any] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :List[Any] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A_ :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
from math import pow, sqrt
def A ( *a_ ) -> bool:
__UpperCamelCase : Union[str, Any] =len(a_ ) > 0 and all(value > 0.0 for value in values )
return result
def A ( a_ ,a_ ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(a_ ,a_ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def A ( a_ ,a_ ,a_ ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(a_ ,a_ ,a_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def A ( a_ ,a_ ,a_ ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(a_ ,a_ ,a_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def A ( a_ ,a_ ,a_ ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,2 ) ,6 )
if validate(a_ ,a_ ,a_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def A ( a_ ,a_ ,a_ ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a ,2 ) / molar_mass ,6 )
if validate(a_ ,a_ ,a_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 245
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = "▁"
SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
SCREAMING_SNAKE_CASE = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( A_ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ['''input_ids''', '''attention_mask''']
lowercase__ = []
lowercase__ = []
def __init__( self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str]="<s>" , snake_case_ : Tuple="</s>" , snake_case_ : str="</s>" , snake_case_ : Dict="<s>" , snake_case_ : Any="<unk>" , snake_case_ : Union[str, Any]="<pad>" , snake_case_ : Union[str, Any]="<mask>" , snake_case_ : Tuple=None , snake_case_ : Dict=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[Dict[str, Any]] = None , snake_case_ : Union[str, Any]=None , **snake_case_ : List[str] , ) -> str:
'''simple docstring'''
A__ = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , tokenizer_file=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
A__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ = 1
A__ = len(self.sp_model )
A__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case_ )
}
A__ = {v: k for k, v in self.lang_code_to_id.items()}
A__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A__ = src_lang if src_lang is not None else "en_XX"
A__ = self.lang_code_to_id[self._src_lang]
A__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ) -> str:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , snake_case_ : int ) -> Union[str, Any]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __magic_name__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self : Optional[int] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __magic_name__ ( self : Tuple , snake_case_ : str ) -> None:
'''simple docstring'''
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __magic_name__ ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self : Tuple , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : List[str] , snake_case_ : Dict , snake_case_ : str , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : Tuple ) -> int:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A__ = src_lang
A__ = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
A__ = self.convert_tokens_to_ids(snake_case_ )
A__ = tgt_lang_id
return inputs
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self : Union[str, Any] , snake_case_ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : int ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(snake_case_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self : List[Any] , snake_case_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self : Any , snake_case_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = "".join(snake_case_ ).replace(snake_case_ , " " ).strip()
return out_string
def __magic_name__ ( self : Tuple , snake_case_ : str , snake_case_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def __magic_name__ ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : str = "en_XX" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "ro_RO" , **snake_case_ : int , ) -> BatchEncoding:
'''simple docstring'''
A__ = src_lang
A__ = tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __magic_name__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self : List[str] , snake_case_ : List[Any] ) -> None:
'''simple docstring'''
A__ = self.lang_code_to_id[src_lang]
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
def __magic_name__ ( self : Any , snake_case_ : str ) -> None:
'''simple docstring'''
A__ = self.lang_code_to_id[lang]
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
| 247
|
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> qiskit.result.counts.Counts:
A__ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
A__ = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A__ = qiskit.execute(lowercase_ , lowercase_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 247
| 1
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,*A : Dict ,**A : Dict ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." ,A ,)
super().__init__(*A ,**A )
| 124
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] ,A : Optional[Any] ,A : List[Any] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__A = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self : Tuple ,A : int = 1 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : float = 0.0 ,A : int = 50 ,A : Optional[bool] = None ,A : Optional[str] = "pil" ,A : bool = True ,):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size ,A ):
__A = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__A = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A ,A ) and len(A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__A = randn_tensor(A ,generator=A ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__A = self.unet(A ,A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(
A ,A ,A ,eta=A ,use_clipped_model_output=A ,generator=A ).prev_sample
__A = (image / 2 + 0.5).clamp(0 ,1 )
__A = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 124
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
__a : List[str] = list(_SCREAMING_SNAKE_CASE )
__a : List[Any] = list(_SCREAMING_SNAKE_CASE )
__a : Tuple = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
__a : Any = '_'
if count > 1:
return False
else:
return "".join(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[str] ):
__a : Any = []
while True:
__a : Union[str, Any] = ['$'] * len(_SCREAMING_SNAKE_CASE )
__a : Tuple = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
__a : List[Any] = compare_string(binary[i] , binary[j] )
if k is False:
__a : Dict = '*'
__a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return pi
__a : Union[str, Any] = list(set(_SCREAMING_SNAKE_CASE ) )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Sequence[float] ):
__a : Optional[Any] = []
for minterm in minterms:
__a : List[Any] = ''
for _ in range(_SCREAMING_SNAKE_CASE ):
__a : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_SCREAMING_SNAKE_CASE )
return temp
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
__a : List[str] = list(_SCREAMING_SNAKE_CASE )
__a : Tuple = list(_SCREAMING_SNAKE_CASE )
__a : Tuple = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : list[str] ):
__a : int = []
__a : str = [0] * len(_SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
__a : Any = 0
__a : Union[str, Any] = -1
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
__a : Optional[int] = j
if count == 1:
__a : Optional[Any] = 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_SCREAMING_SNAKE_CASE ) ):
__a : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
__a : Any = 0
__a : Any = -1
__a : int = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__a : Any = chart[i].count(1 )
if count_n > max_n:
__a : str = count_n
__a : Any = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
__a : List[str] = 0
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[str] , _SCREAMING_SNAKE_CASE : list[str] ):
__a : int = [[0 for x in range(len(_SCREAMING_SNAKE_CASE ) )] for x in range(len(_SCREAMING_SNAKE_CASE ) )]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__a : Union[str, Any] = prime_implicants[i].count('_' )
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , _SCREAMING_SNAKE_CASE ):
__a : int = 1
return chart
def lowerCamelCase ():
__a : Any = int(input('Enter the no. of variables\n' ) )
__a : Union[str, Any] = [
float(_SCREAMING_SNAKE_CASE )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
__a : List[str] = decimal_to_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = check(_SCREAMING_SNAKE_CASE )
print('Prime Implicants are:' )
print(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = prime_implicant_chart(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : List[str] = selection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print('Essential Prime Implicants are:' )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 27
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : str = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : Dict = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
__a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : str = torch.Generator(device=__a ).manual_seed(__a )
__a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a )
__a : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
__a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
__a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : int = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : str = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 27
| 1
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( a__ ):
_lowercase = (KDPMaDiscreteScheduler,)
_lowercase = 1_0
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def _UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCAmelCase : Any = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : Optional[Any] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Tuple = model(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : str = output.prev_sample
_UpperCAmelCase : int = torch.sum(torch.abs(_lowerCamelCase ) )
_UpperCAmelCase : Any = torch.mean(torch.abs(_lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
if torch_device == "mps":
return
_UpperCAmelCase : Dict = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : str = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : Any = self.dummy_model()
_UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Dict = model(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Optional[Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Any = output.prev_sample
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
if torch_device == "mps":
return
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter.to(_lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCAmelCase : Any = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : int = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : str = output.prev_sample
_UpperCAmelCase : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
_UpperCAmelCase : Dict = torch.mean(torch.abs(_lowerCamelCase ) )
if str(_lowerCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 354
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> bool:
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase ) + 1
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_UpperCAmelCase : List[str] = [[0 for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )]
# since string of zero length match pattern of zero length
_UpperCAmelCase : List[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCAmelCase ):
_UpperCAmelCase : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCAmelCase ):
_UpperCAmelCase : Tuple = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCAmelCase ):
for j in range(1 , lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_UpperCAmelCase : Optional[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_UpperCAmelCase : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_UpperCAmelCase : str = dp[i - 1][j]
else:
_UpperCAmelCase : int = 0
else:
_UpperCAmelCase : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE_ = 'aab'
SCREAMING_SNAKE_CASE_ = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 189
| 0
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_A = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def a__ ( ) -> str:
UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Dict = g.get_repo("""huggingface/accelerate""" )
UpperCAmelCase__ : str = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase )
UpperCAmelCase__ : List[Any] = comments[0] if len(lowerCAmelCase ) > 0 else None
UpperCAmelCase__ : Optional[Any] = dt.utcnow()
UpperCAmelCase__ : List[str] = (current_time - issue.updated_at).days
UpperCAmelCase__ : Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 171
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A = logging.get_logger(__name__)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__(self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase__ : List[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
UpperCAmelCase__ : str = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
UpperCAmelCase__ : int = do_resize
UpperCAmelCase__ : Any = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : str = do_center_crop
UpperCAmelCase__ : Dict = crop_size
UpperCAmelCase__ : List[str] = do_flip_channel_order
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PIL.Image.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(_lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
return flip_channel_order(_lowerCamelCase , data_format=_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : List[str] = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Tuple = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : str = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
UpperCAmelCase__ : List[str] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Union[str, Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCAmelCase__ : Tuple = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Optional[Any] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ : List[Any] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase__ : Any = [self.flip_channel_order(image=_lowerCamelCase ) for image in images]
UpperCAmelCase__ : int = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCAmelCase__ : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = target_sizes.numpy()
UpperCAmelCase__ : Tuple = []
for idx in range(len(_lowerCamelCase ) ):
UpperCAmelCase__ : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_lowerCamelCase )
UpperCAmelCase__ : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCamelCase )
else:
UpperCAmelCase__ : str = logits.argmax(dim=1 )
UpperCAmelCase__ : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 171
| 1
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
UpperCamelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
UpperCamelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
UpperCamelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
UpperCamelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
UpperCamelCase = tf.keras.preprocessing.image.img_to_array(test_image)
UpperCamelCase = np.expand_dims(test_image, axis=0)
UpperCamelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
UpperCamelCase = '''Normal'''
if result[0][0] == 1:
UpperCamelCase = '''Abnormality detected'''
| 351
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
UpperCamelCase = {
'''camembert-base''': 512,
}
UpperCamelCase = '''▁'''
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ : int = CamembertTokenizer
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : int="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : str="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : Any , ) -> Any:
'''simple docstring'''
A: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Any = vocab_file
A: Any = False if not self.vocab_file else True
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: List[str] = [self.cls_token_id]
A: List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: List[str] = [self.sep_token_id]
A: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Dict = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 334
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _A ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def _A ( A__ , A__ , A__ , A__ = False ):
"""simple docstring"""
__lowercase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowercase = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__lowercase = '''cpu'''
__lowercase = Path(A__ )
# VAE DECODER
__lowercase = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__lowercase = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowercase = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCAmelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 104
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = x
__lowercase = y
for step in range(A__ ): # noqa: B007
__lowercase = a * a - b * b + x
__lowercase = 2 * a * b + y
__lowercase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _A ( A__ ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _A ( A__ ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(A__ , 1 , 1 ) )
def _A ( A__ = 800 , A__ = 600 , A__ = -0.6 , A__ = 0 , A__ = 3.2 , A__ = 50 , A__ = True , ):
"""simple docstring"""
__lowercase = Image.new('''RGB''' , (image_width, image_height) )
__lowercase = img.load()
# loop through the image-coordinates
for image_x in range(A__ ):
for image_y in range(A__ ):
# determine the figure-coordinates based on the image-coordinates
__lowercase = figure_width / image_width * image_height
__lowercase = figure_center_x + (image_x / image_width - 0.5) * figure_width
__lowercase = figure_center_y + (image_y / image_height - 0.5) * figure_height
__lowercase = get_distance(A__ , A__ , A__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__lowercase = get_color_coded_rgb(A__ )
else:
__lowercase = get_black_and_white_rgb(A__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 104
| 1
|
'''simple docstring'''
import requests
UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
# fetching a list of articles in json format
A: Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 370
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ["""input_features""", """attention_mask"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_60_00 , SCREAMING_SNAKE_CASE_ : int=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = num_mel_bins
A: str = do_ceptral_normalize
A: int = normalize_means
A: List[Any] = normalize_vars
A: Any = True
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , ) -> np.ndarray:
'''simple docstring'''
A: Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A: Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
A: List[Any] = ta_kaldi.fbank(SCREAMING_SNAKE_CASE_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : float = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
A: str = x[:input_length].mean(axis=0 )
A: Dict = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if normalize_vars:
A: Tuple = x[:input_length].std(axis=0 )
A: List[Any] = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if input_length < x.shape[0]:
A: Optional[int] = padding_value
# make sure array is in float32
A: Optional[Any] = x.astype(np.floataa )
return x
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
A: int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A: Any = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
A: Optional[Any] = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A: Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
A: int = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A: Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A: Union[str, Any] = [raw_speech]
# extract fbank features
A: str = [self._extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech]
# convert into correct format for padding
A: int = BatchFeature({'''input_features''': features} )
A: int = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
A: List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ):
A: Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features]
A: List[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
A: Dict = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A: Dict = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A: List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
A: Dict = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 334
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token") )
return token
def __lowerCAmelCase ( ):
__UpperCamelCase : str = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = "imagenet-1k-id2label.json"
__UpperCamelCase : Dict = 1_000
__UpperCamelCase : Union[str, Any] = "huggingface/label-files"
__UpperCamelCase : List[str] = num_labels
__UpperCamelCase : Union[str, Any] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type="dataset" ) ) , "r" ) )
__UpperCamelCase : Optional[int] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Union[str, Any] = idalabel
__UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Union[str, Any] = CvtConfig(num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
__UpperCamelCase : Dict = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
__UpperCamelCase : Optional[Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__UpperCamelCase : int = [2, 2, 20]
__UpperCamelCase : Optional[int] = [3, 12, 16]
__UpperCamelCase : Dict = [192, 768, 1_024]
__UpperCamelCase : List[str] = CvtForImageClassification(snake_case__ )
__UpperCamelCase : List[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
__UpperCamelCase : Union[str, Any] = image_size
__UpperCamelCase : Any = torch.load(snake_case__ , map_location=torch.device("cpu" ) )
__UpperCamelCase : str = OrderedDict()
__UpperCamelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__UpperCamelCase : List[Any] = list_of_state_dict + cls_token(snake_case__ )
__UpperCamelCase : Union[str, Any] = list_of_state_dict + embeddings(snake_case__ )
for cnt in range(config.depth[idx] ):
__UpperCamelCase : Any = list_of_state_dict + attention(snake_case__ , snake_case__ )
__UpperCamelCase : Optional[int] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case__ )
for i in range(len(snake_case__ ) ):
__UpperCamelCase : str = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 298
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
| 1
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase (lowercase_: int ) -> str:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
A__ : Any = precision
A__ : Tuple = ceil(precision / 14 )
A__ : Optional[int] = 426880 * Decimal(10005 ).sqrt()
A__ : Dict = 1
A__ : Dict = 13591409
A__ : Optional[int] = Decimal(lowercase_ )
for k in range(1 , lowercase_ ):
A__ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase_ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
A_ : List[str] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 369
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[int] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141
| 0
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : Dict , _UpperCamelCase : int ) ->None:
snake_case_ = deque()
snake_case_ = set()
if not n:
snake_case_ = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
snake_case_ = n
def snake_case__( self : Dict , _UpperCamelCase : T ) ->None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
snake_case_ = self.dq_store.pop()
self.key_reference.remove(_UpperCamelCase )
else:
self.dq_store.remove(_UpperCamelCase )
self.dq_store.appendleft(_UpperCamelCase )
self.key_reference.add(_UpperCamelCase )
def snake_case__( self : List[Any] ) ->None:
for k in self.dq_store:
print(_UpperCamelCase )
def __repr__( self : Optional[Any] ) ->str:
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 8
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Tuple = embeddings_size
UpperCAmelCase_ : Union[str, Any] = hidden_sizes
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : str = scope
UpperCAmelCase_ : str = len(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs
UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__magic_name__ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
UpperCAmelCase_ : str = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ : List[Any] = layer_type
UpperCAmelCase_ : int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ):
UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple()
def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
if isinstance(lowerCAmelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(lowerCAmelCase_ , lowerCAmelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ):
UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" )
# forward pass
UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
| 268
| 0
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase : Dict = logging.get_logger(__name__)
# General docstring
lowercase : Optional[Any] = "ResNetConfig"
# Base docstring
lowercase : int = "microsoft/resnet-50"
lowercase : List[Any] = [1, 2048, 7, 7]
# Image classification docstring
lowercase : Tuple = "microsoft/resnet-50"
lowercase : Dict = "tiger cat"
lowercase : Union[str, Any] = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ):
"""simple docstring"""
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ):
"""simple docstring"""
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ):
"""simple docstring"""
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ):
"""simple docstring"""
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ):
"""simple docstring"""
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ):
"""simple docstring"""
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ResNetConfig
__lowercase = """resnet"""
__lowercase = """pixel_values"""
__lowercase = True
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
lowercase : Tuple = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase : str = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , _lowerCamelCase , )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _lowerCamelCase , )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , _lowerCamelCase , )
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 160
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase : Any = logging.get_logger(__name__)
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_=None ):
"""simple docstring"""
if not conversation_id:
_snake_case = uuid.uuida()
if past_user_inputs is None:
_snake_case = []
if generated_responses is None:
_snake_case = []
_snake_case = conversation_id
_snake_case = past_user_inputs
_snake_case = generated_responses
_snake_case = text
def __eq__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
_snake_case = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
_snake_case = text
def lowerCamelCase ( self ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_snake_case = None
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
self.generated_responses.append(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
"""simple docstring"""
_snake_case = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_snake_case = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
_lowerCamelCase , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
if self.tokenizer.pad_token_id is None:
_snake_case = self.tokenizer.eos_token
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = {}
_snake_case = {}
_snake_case = {}
if min_length_for_response is not None:
_snake_case = min_length_for_response
if minimum_tokens is not None:
_snake_case = minimum_tokens
if "max_length" in generate_kwargs:
_snake_case = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCAmelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=0 , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = super().__call__(lowerCAmelCase_ , num_workers=lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=32 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_snake_case = self.tokenizer._build_conversation_input_ids(lowerCAmelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_snake_case = self._legacy_parse_and_tokenize(lowerCAmelCase_ )
if self.framework == "pt":
_snake_case = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_snake_case = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=10 , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length )
_snake_case = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
_snake_case = max_length - minimum_tokens
_snake_case = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_snake_case = model_inputs['attention_mask'][:, -trim:]
_snake_case = model_inputs.pop('conversation' )
_snake_case = max_length
_snake_case = self.model.generate(**lowerCAmelCase_ , **lowerCAmelCase_ )
if self.model.config.is_encoder_decoder:
_snake_case = 1
else:
_snake_case = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=True ):
"""simple docstring"""
_snake_case = model_outputs['output_ids']
_snake_case = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
_snake_case = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(lowerCAmelCase_ )
return conversation
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.tokenizer.eos_token_id
_snake_case = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) > self.tokenizer.model_max_length:
_snake_case = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 160
| 1
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float("""nan""")
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = sys.stdout
A__ = open(lowercase , "a" )
def __getattr__( self , lowercase ) -> List[Any]:
'''simple docstring'''
return getattr(self.stdout , lowercase )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
self.stdout.write(lowercase )
# strip tqdm codes
self.file.write(re.sub(R"^.*\r" , "" , lowercase , 0 , re.M ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int]=8_0 , SCREAMING_SNAKE_CASE_: List[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
A__ = []
# deal with critical env vars
A__ = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
A__ = os.environ.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if val is not None:
cmd.append(F'{key}={val}' )
# python executable (not always needed if the script is executable)
A__ = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(SCREAMING_SNAKE_CASE_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A__ = []
A__ = ""
while len(SCREAMING_SNAKE_CASE_ ) > 0:
current_line += F'{cmd.pop(0 )} '
if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(SCREAMING_SNAKE_CASE_ )
A__ = ""
return "\\\n".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: int ) -> Union[str, Any]:
'''simple docstring'''
A__ = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
A__ = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A__ = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str ) -> int:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
A__ = subprocess.run(SCREAMING_SNAKE_CASE_ , capture_output=SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
A__ = variation.replace(" " , "-" )
with open(Path(SCREAMING_SNAKE_CASE_ ) / F'log.{prefix}.stdout.txt' , "w" ) as f:
f.write(result.stdout )
with open(Path(SCREAMING_SNAKE_CASE_ ) / F'log.{prefix}.stderr.txt' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'{output_dir}/all_results.json' , "r" , encoding="utf-8" ) as f:
A__ = json.load(SCREAMING_SNAKE_CASE_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: str , ) -> List[Any]:
'''simple docstring'''
A__ = []
A__ = []
A__ = F'{id}: {variation:<{longest_variation_len}}'
A__ = F'{preamble}: '
A__ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(SCREAMING_SNAKE_CASE_ ) , desc=SCREAMING_SNAKE_CASE_ , leave=SCREAMING_SNAKE_CASE_ ):
A__ = process_run_single(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = single_run_metrics[target_metric_key]
if not math.isnan(SCREAMING_SNAKE_CASE_ ):
metrics.append(SCREAMING_SNAKE_CASE_ )
results.append(SCREAMING_SNAKE_CASE_ )
outcome += "✓"
else:
outcome += "✘"
A__ = F'\33[2K\r{outcome}'
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A__ = round(mean_metrics[target_metric_key] , 2 )
A__ = F'{outcome} {mean_target}'
if len(SCREAMING_SNAKE_CASE_ ) > 1:
results_str += F' {tuple(round(SCREAMING_SNAKE_CASE_ , 2 ) for x in results )}'
print(SCREAMING_SNAKE_CASE_ )
A__ = variation
return mean_metrics
else:
print(SCREAMING_SNAKE_CASE_ )
return {variation_key: variation, target_metric_key: nan}
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
A__ = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB\n'
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = pd.DataFrame(SCREAMING_SNAKE_CASE_ )
A__ = "variation"
A__ = "diff_%"
A__ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A__ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(SCREAMING_SNAKE_CASE_ ):
# as a fallback, use the minimal value as the sentinel
A__ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(SCREAMING_SNAKE_CASE_ ):
A__ = df.apply(
lambda SCREAMING_SNAKE_CASE_ : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
A__ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A__ = df.reindex(SCREAMING_SNAKE_CASE_ , axis="columns" ) # reorder cols
# capitalize
A__ = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
A__ = df.rename(lambda SCREAMING_SNAKE_CASE_ : c.replace("_" , "<br>" ) , axis="columns" )
A__ = df.rename(lambda SCREAMING_SNAKE_CASE_ : c.replace("_" , "\n" ) , axis="columns" )
A__ = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=SCREAMING_SNAKE_CASE_ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=SCREAMING_SNAKE_CASE_ , floatfmt=".2f" )]
print("\n\n".join(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , nargs="+" , required=SCREAMING_SNAKE_CASE_ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=SCREAMING_SNAKE_CASE_ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=SCREAMING_SNAKE_CASE_ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=SCREAMING_SNAKE_CASE_ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
A__ = parser.parse_args()
A__ = args.output_dir
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
A__ = get_base_command(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# split each dimension into its --foo variations
A__ = [list(map(str.strip , re.split(R"\|" , SCREAMING_SNAKE_CASE_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A__ = list(map(str.strip , map(" ".join , itertools.product(*SCREAMING_SNAKE_CASE_ ) ) ) )
A__ = max(len(SCREAMING_SNAKE_CASE_ ) for x in variations )
# split wanted keys
A__ = args.report_metric_keys.split()
# capture prints into a log file for convenience
A__ = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(F'and this script\'s output is also piped into {report_fn}' )
A__ = Tee(SCREAMING_SNAKE_CASE_ )
print(F'\n*** Running {len(SCREAMING_SNAKE_CASE_ )} benchmarks:' )
print(F'Base command: {" ".join(SCREAMING_SNAKE_CASE_ )}' )
A__ = "variation"
A__ = []
for id, variation in enumerate(tqdm(SCREAMING_SNAKE_CASE_ , desc="Total completion: " , leave=SCREAMING_SNAKE_CASE_ ) ):
A__ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.target_metric_key , SCREAMING_SNAKE_CASE_ , args.repeat_times , SCREAMING_SNAKE_CASE_ , args.verbose , ) )
process_results(SCREAMING_SNAKE_CASE_ , args.target_metric_key , SCREAMING_SNAKE_CASE_ , args.base_variation , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 68
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase__ ) )
def _lowercase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase__ ) )
def _lowercase ( self : str ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase__ ) )
def _lowercase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase__ ) )
def _lowercase ( self : Tuple ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase__ ) )
def _lowercase ( self : int ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE : str = """fp16"""
self.assertTrue(is_safetensors_compatible(UpperCAmelCase__ , variant=UpperCAmelCase__ ) )
def _lowercase ( self : List[str] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE : Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(UpperCAmelCase__ , variant=UpperCAmelCase__ ) )
def _lowercase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE : str = """fp16"""
self.assertTrue(is_safetensors_compatible(UpperCAmelCase__ , variant=UpperCAmelCase__ ) )
def _lowercase ( self : Dict ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE : List[str] = """fp16"""
self.assertFalse(is_safetensors_compatible(UpperCAmelCase__ , variant=UpperCAmelCase__ ) )
def _lowercase ( self : str ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE : Any = """fp16"""
self.assertTrue(is_safetensors_compatible(UpperCAmelCase__ , variant=UpperCAmelCase__ ) )
def _lowercase ( self : str ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE : Tuple = """fp16"""
self.assertTrue(is_safetensors_compatible(UpperCAmelCase__ , variant=UpperCAmelCase__ ) )
def _lowercase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE : Optional[Any] = """fp16"""
self.assertFalse(is_safetensors_compatible(UpperCAmelCase__ , variant=UpperCAmelCase__ ) )
| 245
| 0
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def __A ( self: Tuple ) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __A ( self: str ) -> Tuple:
_A = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
_A = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 3_80_15, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 2_55_06, '''token_str''': ''' accuser'''},
] , )
_A = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-05,
'''token''': 3_80_15,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-05,
'''token''': 2_55_06,
'''token_str''': ''' accuser''',
},
] , )
_A = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 29_41, '''token_str''': ''' Te'''},
] , )
@require_torch
def __A ( self: List[Any] ) -> Tuple:
_A = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
_A = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 3_56_76, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 1_64_16, '''token_str''': '''ELS'''},
] , )
_A = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 1_64_16, '''token_str''': '''ELS'''},
] , )
_A = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 29_41, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
] , )
_A = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
[
{
'''score''': 2.2e-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def __A ( self: Any ) -> Optional[Any]:
_A = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
_A = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__A , __A )
@slow
@require_torch
def __A ( self: List[Any] ) -> Tuple:
_A = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(__A )
@slow
@require_tf
def __A ( self: Tuple ) -> Union[str, Any]:
_A = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(__A )
def __A ( self: int , __A: str ) -> Dict:
_A = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__A ) , [
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_10, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 15_73, '''token_str''': ''' Chris'''},
] , )
_A = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__A ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 22_01,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 1_27_90,
'''token_str''': ''' Lyon''',
},
] , )
_A = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__A ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 29_41, '''token_str''': ''' Te'''},
] , )
@require_torch
def __A ( self: List[str] ) -> Tuple:
_A = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
_A = None
_A = None
self.run_pipeline_test(__A , [] )
@require_tf
def __A ( self: List[str] ) -> List[str]:
_A = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
_A = None
_A = None
self.run_pipeline_test(__A , [] )
def __A ( self: Dict , __A: str , __A: List[str] , __A: List[str] ) -> Union[str, Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
_A = FillMaskPipeline(model=__A , tokenizer=__A )
_A = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def __A ( self: Optional[Any] , __A: Any , __A: Any ) -> List[Any]:
_A = fill_masker.tokenizer
_A = fill_masker.model
_A = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
__A , [
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
] , )
_A = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__A , [
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
] , )
_A = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__A , [
[
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
],
[
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
],
] , )
with self.assertRaises(__A ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__A ):
fill_masker('''This is''' )
self.run_test_top_k(__A , __A )
self.run_test_targets(__A , __A )
self.run_test_top_k_targets(__A , __A )
self.fill_mask_with_duplicate_targets_and_top_k(__A , __A )
self.fill_mask_with_multiple_masks(__A , __A )
def __A ( self: List[str] , __A: Any , __A: List[str] ) -> List[str]:
_A = tokenizer.get_vocab()
_A = sorted(vocab.keys() )[:2]
# Pipeline argument
_A = FillMaskPipeline(model=__A , tokenizer=__A , targets=__A )
_A = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__A , [
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
] , )
_A = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __A )
_A = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__A ) )
# Call argument
_A = FillMaskPipeline(model=__A , tokenizer=__A )
_A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__A )
self.assertEqual(
__A , [
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
] , )
_A = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __A )
_A = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__A ) )
# Score equivalence
_A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__A )
_A = [top_mask['''token_str'''] for top_mask in outputs]
_A = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__A ) == set(__A ):
_A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__A )
_A = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__A ) , nested_simplify(__A ) )
# Raises with invalid
with self.assertRaises(__A ):
_A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__A ):
_A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''''''] )
with self.assertRaises(__A ):
_A = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='''''' )
def __A ( self: str , __A: str , __A: int ) -> List[Any]:
_A = FillMaskPipeline(model=__A , tokenizer=__A , top_k=2 )
_A = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__A , [
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
] , )
_A = FillMaskPipeline(model=__A , tokenizer=__A )
_A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__A , [
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
] , )
self.assertEqual(nested_simplify(__A ) , nested_simplify(__A ) )
def __A ( self: Optional[Any] , __A: List[Any] , __A: List[str] ) -> Tuple:
_A = tokenizer.get_vocab()
_A = FillMaskPipeline(model=__A , tokenizer=__A )
# top_k=2, ntargets=3
_A = sorted(vocab.keys() )[:3]
_A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__A )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A = [el['''token_str'''] for el in sorted(__A , key=lambda __A : x["score"] , reverse=__A )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__A ).issubset(__A ):
_A = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__A )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__A ) , nested_simplify(__A ) )
def __A ( self: List[str] , __A: Union[str, Any] , __A: Any ) -> Dict:
_A = FillMaskPipeline(model=__A , tokenizer=__A )
_A = tokenizer.get_vocab()
# String duplicates + id duplicates
_A = sorted(vocab.keys() )[:3]
_A = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=__A , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__A ) , 3 )
def __A ( self: Tuple , __A: List[Any] , __A: List[str] ) -> Optional[Any]:
_A = FillMaskPipeline(model=__A , tokenizer=__A )
_A = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__A , [
[
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
],
[
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
],
[
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
{'''sequence''': ANY(__A ), '''score''': ANY(__A ), '''token''': ANY(__A ), '''token_str''': ANY(__A )},
],
] , )
| 368
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __A ( self: Any ) -> Union[str, Any]:
_A = self.dummy_uncond_unet
_A = ScoreSdeVeScheduler()
_A = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A ).images
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A , return_dict=__A )[
0
]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Dict ) -> Any:
_A = '''google/ncsnpp-church-256'''
_A = UNetaDModel.from_pretrained(__A )
_A = ScoreSdeVeScheduler.from_pretrained(__A )
_A = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__A ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_A = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 75
| 0
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCAmelCase_ : List[str] = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
UpperCAmelCase_ : Optional[Any] = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
UpperCAmelCase_ : Dict = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def _A ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any="auto" , __lowerCamelCase : List[Any]=-1 , __lowerCamelCase : Optional[int]=0.9 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Optional[Any]=500 , __lowerCamelCase : int="gpt2-large" , __lowerCamelCase : Union[str, Any]=-1 , __lowerCamelCase : List[str]=1_024 , __lowerCamelCase : Dict=25 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=25 , ):
UpperCamelCase :int = compute_mauve(
p_text=__lowerCamelCase , q_text=__lowerCamelCase , p_features=__lowerCamelCase , q_features=__lowerCamelCase , p_tokens=__lowerCamelCase , q_tokens=__lowerCamelCase , num_buckets=__lowerCamelCase , pca_max_data=__lowerCamelCase , kmeans_explained_var=__lowerCamelCase , kmeans_num_redo=__lowerCamelCase , kmeans_max_iter=__lowerCamelCase , featurize_model_name=__lowerCamelCase , device_id=__lowerCamelCase , max_text_length=__lowerCamelCase , divergence_curve_discretization_size=__lowerCamelCase , mauve_scaling_factor=__lowerCamelCase , verbose=__lowerCamelCase , seed=__lowerCamelCase , )
return out
| 38
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : str ):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 155
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : int = {'vocab_file': 'sentencepiece.bpe.model'}
A_ : List[Any] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
A_ : Dict = {
'camembert-base': 512,
}
A_ : Optional[Any] = '▁'
class _a (__lowercase ):
'''simple docstring'''
UpperCAmelCase__: str = VOCAB_FILES_NAMES
UpperCAmelCase__: Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: str = ["input_ids", "attention_mask"]
def __init__( self , A__ , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=["<s>NOTUSED", "</s>NOTUSED"] , A__ = None , **A__ , ):
# Mask token behave like a normal word, i.e. include the space before it
A__ : str = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
A__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
A__ : str = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
A__ : Tuple = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
A__ : Union[str, Any] = len(self.fairseq_tokens_to_ids )
A__ : str = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
A__ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __A ( self , A__ , A__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
A__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __A ( self , A__ , A__ = None ):
A__ : Any = [self.sep_token_id]
A__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __A ( self ):
A__ : List[Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , A__ ):
return self.sp_model.encode(_a , out_type=_a )
def __A ( self , A__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_a )
def __A ( self , A__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self , A__ ):
A__ : Dict = []
A__ : int = ''''''
A__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A__ : Optional[int] = True
A__ : Any = []
else:
current_sub_tokens.append(_a )
A__ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self ):
A__ : Optional[Any] = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self , A__ ):
A__ : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : List[str] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , A__ , A__ = None ):
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ : int = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
A__ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 365
|
import argparse
from collections import defaultdict
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Any ) -> int:
A__ : Optional[Any] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , """r""" ) as f:
A__ : Union[str, Any] = f.readlines()
A__ : str = f"""class {class_name}("""
A__ : Optional[Any] = f"""{4 * ' '}def {test_name}("""
A__ : Union[str, Any] = f"""{8 * ' '}{correct_line.split()[0]}"""
A__ : Optional[int] = f"""{16 * ' '}{correct_line.split()[0]}"""
A__ : int = False
A__ : str = False
A__ : Tuple = False
A__ : Optional[int] = False
A__ : Optional[Any] = 0
A__ : Dict = 0
A__ : List[str] = []
for line in lines:
if line.startswith(lowercase_ ):
A__ : Dict = True
elif in_class and line.startswith(lowercase_ ):
A__ : Optional[Any] = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
A__ : Tuple = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A__ : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A__ : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
A__ : List[str] = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , """w""" ) as f:
for line in new_lines:
f.write(lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , """r""" ) as f:
A__ : Dict = {l.strip() for l in f.readlines()}
else:
A__ : List[str] = None
with open(lowercase_ , """r""" ) as f:
A__ : int = f.readlines()
A__ : Union[str, Any] = defaultdict(lowercase_ )
for line in correct_lines:
A__ , A__ , A__ , A__ : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
A_ : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 141
| 0
|
from numpy import exp, pi, sqrt
def lowerCAmelCase__(__snake_case ,__snake_case = 0.0 ,__snake_case = 1.0 ) -> Dict:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_UpperCAmelCase = Vector()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case__ ) , "(0,0,0,0,0,1)" )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case__ ) , 4 )
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2] )
_UpperCAmelCase = Vector([1, 2, 3, 4, 5] )
_UpperCAmelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_UpperCAmelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCamelCase ( self : str ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([2, -1, 4] ) # for test of dot product
_UpperCAmelCase = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 2, 3] )
_UpperCAmelCase = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case__ , snake_case__ ) ) , "(3,4,7)" )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 0, 0, 0, 0, 0] )
_UpperCAmelCase = x.copy()
self.assertEqual(str(snake_case__ ) , str(snake_case__ ) )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
_UpperCAmelCase = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case__ ) , "(0,1,0)" )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case__ ) )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case__ , snake_case__ ) )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case__ , snake_case__ ) )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_UpperCAmelCase = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case__ ) )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCamelCase ( self : str ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 133
| 0
|
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : str , snake_case_ : List[str] , snake_case_ : List[str]=13 , snake_case_ : Optional[int]=7 , snake_case_ : Optional[int]=True , snake_case_ : Any=True , snake_case_ : Tuple=True , snake_case_ : Any=True , snake_case_ : int=99 , snake_case_ : int=32 , snake_case_ : Tuple=5 , snake_case_ : Union[str, Any]=4 , snake_case_ : Dict=37 , snake_case_ : Tuple="gelu" , snake_case_ : Tuple=0.1 , snake_case_ : str=0.1 , snake_case_ : int=512 , snake_case_ : Dict=16 , snake_case_ : Optional[Any]=2 , snake_case_ : int=0.02 , snake_case_ : List[Any]=False , snake_case_ : str=True , snake_case_ : Any="None" , snake_case_ : int=3 , snake_case_ : Tuple=4 , snake_case_ : List[Any]=None , ):
UpperCamelCase_: Union[str, Any] = parent
UpperCamelCase_: Optional[int] = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: int = is_training
UpperCamelCase_: Optional[Any] = use_input_mask
UpperCamelCase_: str = use_token_type_ids
UpperCamelCase_: str = use_labels
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: Any = num_hidden_layers
UpperCamelCase_: Union[str, Any] = num_attention_heads
UpperCamelCase_: List[Any] = intermediate_size
UpperCamelCase_: List[Any] = hidden_act
UpperCamelCase_: Optional[Any] = hidden_dropout_prob
UpperCamelCase_: Any = attention_probs_dropout_prob
UpperCamelCase_: Optional[Any] = max_position_embeddings
UpperCamelCase_: Tuple = type_vocab_size
UpperCamelCase_: Any = type_sequence_label_size
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Union[str, Any] = num_labels
UpperCamelCase_: Tuple = num_choices
UpperCamelCase_: List[str] = relative_attention
UpperCamelCase_: List[str] = position_biased_input
UpperCamelCase_: str = pos_att_type
UpperCamelCase_: Any = scope
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Optional[int] = None
if self.use_input_mask:
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase_: List[str] = None
if self.use_token_type_ids:
UpperCamelCase_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_: Any = None
UpperCamelCase_: List[str] = None
UpperCamelCase_: Tuple = None
if self.use_labels:
UpperCamelCase_: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Union[str, Any] ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: List[str] = self.get_config()
UpperCamelCase_: Union[str, Any] = 300
return config
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : str ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Optional[int] ):
UpperCamelCase_: Any = DebertaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: List[Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase_: Tuple = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase_: Optional[int] = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCAmelCase__ ( self : int , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : str ):
UpperCamelCase_: List[Any] = DebertaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
UpperCamelCase_: Tuple = self.num_labels
UpperCamelCase_: Optional[int] = DebertaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[str] ):
UpperCamelCase_: int = self.num_labels
UpperCamelCase_: List[str] = DebertaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : int , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
UpperCamelCase_: Dict = DebertaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Optional[Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
): List[str] = config_and_inputs
UpperCamelCase_: str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[int] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Dict = False
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Union[str, Any] = DebertaModelTester(self )
UpperCamelCase_: Tuple = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self : Any ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Optional[Any] = DebertaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowerCAmelCase__ ( self : List[str] ):
pass
@slow
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Tuple = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
UpperCamelCase_: int = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase_: str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase_: str = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase_: List[str] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 223
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = CTRLTokenizer
__UpperCamelCase : int = False
__UpperCamelCase : List[str] = False
def lowerCAmelCase__ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_: int = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCamelCase_: int = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: Union[str, Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCamelCase_: Tuple = {"""unk_token""": """<unk>"""}
UpperCamelCase_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCAmelCase__ ( self : Optional[int] , **snake_case_ : int ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[str] ):
UpperCamelCase_: Dict = """adapt react readapt apt"""
UpperCamelCase_: List[str] = """adapt react readapt apt"""
return input_text, output_text
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase_: List[Any] = """adapt react readapt apt"""
UpperCamelCase_: Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCamelCase_: int = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase_: Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 223
| 1
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__lowercase = grid[0]
for row_n in range(1 , len(A__ ) ):
__lowercase = grid[row_n]
__lowercase = fill_row(A__ , A__ )
__lowercase = grid[row_n]
return grid[-1][-1]
def _A ( A__ , A__ ):
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(A__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
from __future__ import annotations
def __magic_name__ ( A : list ):
'''simple docstring'''
if len(A ) == 0:
return []
a , a = min(A ), max(A )
a = int(max_value - min_value ) + 1
a = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 107
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 280
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase__ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Any = """maskformer"""
UpperCAmelCase_ : Union[str, Any] = {"""hidden_size""": """mask_feature_size"""}
UpperCAmelCase_ : Union[str, Any] = ["""resnet""", """swin"""]
UpperCAmelCase_ : Optional[int] = ["""detr"""]
def __init__( self : Union[str, Any] , lowercase_ : int = 256 , lowercase_ : int = 256 , lowercase_ : float = 0.1 , lowercase_ : bool = False , lowercase_ : Optional[Dict] = None , lowercase_ : Optional[Dict] = None , lowercase_ : float = 0.02 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 20.0 , lowercase_ : Optional[bool] = None , **lowercase_ : int , ) -> Optional[Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase : List[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : List[Any] = backbone_config.pop('model_type' )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : List[Any] = config_class.from_dict(lowercase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase : Dict = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase : Tuple = (
decoder_config.pop('model_type' ) if isinstance(lowercase_ , lowercase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase : str = config_class.from_dict(lowercase_ )
UpperCAmelCase : int = backbone_config
UpperCAmelCase : Tuple = decoder_config
# main feature dimension for the model
UpperCAmelCase : Union[str, Any] = fpn_feature_size
UpperCAmelCase : Optional[Any] = mask_feature_size
# initializer
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : List[str] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase : int = cross_entropy_weight
UpperCAmelCase : Tuple = dice_weight
UpperCAmelCase : int = mask_weight
UpperCAmelCase : Any = use_auxiliary_loss
UpperCAmelCase : Tuple = no_object_weight
UpperCAmelCase : str = output_auxiliary_logits
UpperCAmelCase : Dict = self.decoder_config.encoder_attention_heads
UpperCAmelCase : Union[str, Any] = self.decoder_config.num_hidden_layers
super().__init__(**lowercase_ )
@classmethod
def UpperCAmelCase_ ( cls : Any , lowercase_ : PretrainedConfig , lowercase_ : PretrainedConfig , **lowercase_ : Optional[Any] ) -> str:
return cls(
backbone_config=lowercase_ , decoder_config=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict[str, any]:
UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : List[str] = self.backbone_config.to_dict()
UpperCAmelCase : Any = self.decoder_config.to_dict()
UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 280
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = '''ZinengTang/tvlt-base'''
__a = tempfile.mkdtemp()
def _lowerCamelCase ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_feature_extractor()
__a = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE)
processor.save_pretrained(self.tmpdirname)
__a = TvltProcessor.from_pretrained(self.tmpdirname)
self.assertIsInstance(processor.feature_extractor , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_feature_extractor()
__a = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE)
__a = np.ones([12_000])
__a = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = processor(audio=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_feature_extractor()
__a = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE)
__a = np.ones([3, 224, 224])
__a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_feature_extractor()
__a = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE)
__a = np.ones([12_000])
__a = np.ones([3, 224, 224])
__a = processor(audio=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_feature_extractor()
__a = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE)
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 49
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase_ ( self : int ):
a : Dict = 32
a : str = embedder_hidden_size
# image encoding components
a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL()
a : str = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ):
if str(__snake_case ).startswith('mps' ):
a : Tuple = torch.manual_seed(__snake_case )
else:
a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
a : Optional[Any] = input_image * 0.5 + 0.5
a : Optional[Any] = input_image.clamp(0 , 1 )
a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Any = StableUnCLIPImgaImgPipeline(**__snake_case )
a : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = self.get_dummy_inputs(__snake_case )
inputs.update({'image_embeds': None} )
a : str = sd_pipe(**__snake_case ).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ):
a : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a : Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = pipe(
__snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : List[Any] = CanineTokenizer
a : Union[str, Any] = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
__lowercase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> CanineTokenizer:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
__lowercase = 1024
return tokenizer
@require_torch
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
__lowercase = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
__lowercase = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors='''pt''' )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual((2, 39) ,batch.input_ids.shape )
self.assertEqual((2, 39) ,batch.attention_mask.shape )
@require_torch
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
__lowercase = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' ,_lowerCamelCase )
self.assertIn('''attention_mask''' ,_lowerCamelCase )
self.assertIn('''token_type_ids''' ,_lowerCamelCase )
@require_torch
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
__lowercase = tokenizer(
text_target=_lowerCamelCase ,max_length=32 ,padding='''max_length''' ,truncation=_lowerCamelCase ,return_tensors='''pt''' )
self.assertEqual(32 ,targets['''input_ids'''].shape[1] )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
__lowercase = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
__lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__lowercase = chr(0xe_0_0_7 )
additional_special_tokens.append(_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
__lowercase = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertIn(_lowerCamelCase ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase , __lowercase = self.get_clean_sequence(_lowerCamelCase )
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_5
__lowercase = chr(_lowerCamelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,1 )
__lowercase = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,input_encoded + special_token_id )
__lowercase = tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = chr(0xe_0_0_5 )
__lowercase = chr(0xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=_lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
__lowercase = tokenizer.tokenize(_lowerCamelCase )
__lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,1 )
self.assertEqual(len(_lowerCamelCase ) ,1 )
self.assertEqual(token_a[0] ,_lowerCamelCase )
self.assertEqual(token_a[0] ,_lowerCamelCase )
@require_tokenizers
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
__lowercase = AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCamelCase )
tokenizer.from_pretrained(_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''special_tokens_map.json''' ) ,encoding='''utf-8''' ) as json_file:
__lowercase = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''tokenizer_config.json''' ) ,encoding='''utf-8''' ) as json_file:
__lowercase = json.load(_lowerCamelCase )
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
__lowercase = [new_token_a]
__lowercase = [new_token_a]
with open(os.path.join(_lowerCamelCase ,'''special_tokens_map.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''tokenizer_config.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowercase = tokenizer_class.from_pretrained(_lowerCamelCase ,extra_ids=0 )
self.assertIn(_lowerCamelCase ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,)
__lowercase = 0xe_0_0_7
__lowercase = chr(_lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowercase = [AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase )]
__lowercase = tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,extra_ids=0 )
self.assertIn(_lowerCamelCase ,tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = '''hello world'''
if self.space_between_special_tokens:
__lowercase = '''[CLS] hello world [SEP]'''
else:
__lowercase = input
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.decode(_lowerCamelCase ,spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCamelCase ,[output, output.lower()] )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__lowercase = '''a'''
__lowercase = ord(_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase ,attr + '''_id''' ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,attr + '''_id''' ) ,_lowerCamelCase )
setattr(_lowerCamelCase ,attr + '''_id''' ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,attr + '''_id''' ) ,_lowerCamelCase )
setattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ,[] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens''' ) ,[] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[] )
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
setattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ,[additional_special_token_id] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens''' ) ,[additional_special_token] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[additional_special_token_id] )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
pass
| 217
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = checkpoints.load_tax_checkpoint(__magic_name__ )
lowercase__ = flatten_dict(__magic_name__ )
return flax_params
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
lowercase__ = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase__ = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(__magic_name__ , __magic_name__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(__magic_name__ , __magic_name__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , __magic_name__ )
lowercase__ = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , __magic_name__ )
lowercase__ = flax_dict[key]
lowercase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase__ = torch.from_numpy(converted_dict[key].T )
else:
lowercase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=False , __magic_name__ : List[Any]=False ) -> int:
"""simple docstring"""
lowercase__ = get_flax_param(__magic_name__ )
if not use_large:
lowercase__ = PixaStructVisionConfig()
lowercase__ = PixaStructTextConfig()
else:
lowercase__ = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase__ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowercase__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__magic_name__ )
lowercase__ = PixaStructForConditionalGeneration(__magic_name__ )
lowercase__ = rename_and_convert_flax_params(__magic_name__ )
model.load_state_dict(__magic_name__ )
lowercase__ = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
lowercase__ = PixaStructImageProcessor()
lowercase__ = PixaStructProcessor(image_processor=__magic_name__ , tokenizer=__magic_name__ )
if use_large:
lowercase__ = 4096
lowercase__ = True
# mkdir if needed
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
print("""Model saved in {}""".format(__magic_name__ ) )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
A : Union[str, Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 305
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=4 , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = True
A__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaxBertModelTester(self )
@slow
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 305
| 1
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : str ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case : List[str] = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> int:
__snake_case : List[str] = self._create_example_records()
__snake_case : Tuple = Dataset.from_list(lowerCamelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(lowerCamelCase ):
self.assertDictEqual(lowerCamelCase , example_records[i] )
def __snake_case ( self : int ) -> Tuple:
__snake_case : Dict = self._create_example_records()
__snake_case : Union[str, Any] = Dataset.from_list(lowerCamelCase )
__snake_case : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __snake_case ( self : Optional[Any] ) -> int: # checks what happens with missing columns
__snake_case : Optional[Any] = [{"col_1": 1}, {"col_2": "x"}]
__snake_case : Tuple = Dataset.from_list(lowerCamelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def __snake_case ( self : Dict ) -> int: # checks if the type can be inferred from the second record
__snake_case : Optional[Any] = [{"col_1": []}, {"col_1": [1, 2]}]
__snake_case : Dict = Dataset.from_list(lowerCamelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def __snake_case ( self : Any ) -> Union[str, Any]:
__snake_case : int = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 134
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_snake_case : Union[str, Any] = ["small", "medium", "large"]
_snake_case : List[Any] = "lm_head.decoder.weight"
_snake_case : Optional[Any] = "lm_head.weight"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = torch.load(__lowerCamelCase )
__snake_case : Dict = d.pop(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
_snake_case : Any = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_snake_case : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
_snake_case : List[str] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 134
| 1
|
"""simple docstring"""
import math
def __A ( a_ :int) -> int:
if not isinstance(a_ , a_):
__a : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a_)
if number < 1:
__a : List[Any] = F"""Input value of [number={number}] must be > 0"""
raise ValueError(a_)
elif number == 1:
return 3
elif number == 2:
return 5
else:
__a : Optional[int] = int(math.log(number // 3 , 2)) + 2
__a : Any = [3, 5]
__a : Optional[Any] = 2
__a : Dict = 3
for block in range(1 , a_):
for _ in range(a_):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
A = 0
try:
A = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}')
| 160
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = """▁"""
lowercase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
lowercase_ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
lowercase_ = {
"""facebook/s2t-small-librispeech-asr""": 1_024,
}
lowercase_ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
lowercase_ = {"""mustc""": MUSTC_LANGS}
class SCREAMING_SNAKE_CASE (lowercase__ ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = MAX_MODEL_INPUT_SIZES
_UpperCamelCase : Dict = ["""input_ids""", """attention_mask"""]
_UpperCamelCase : List[int] = []
def __init__( self : List[str] , a : Dict , a : int , a : Tuple="<s>" , a : Optional[int]="</s>" , a : Optional[Any]="<pad>" , a : Dict="<unk>" , a : Optional[int]=False , a : Optional[Any]=False , a : str=None , a : Optional[Any]=None , a : List[Any] = None , **a : Tuple , )-> Tuple:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , do_upper_case=lowercase_ , do_lower_case=lowercase_ , tgt_lang=lowercase_ , lang_codes=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowercase__ = do_upper_case
lowercase__ = do_lower_case
lowercase__ = load_json(lowercase_ )
lowercase__ = {v: k for k, v in self.encoder.items()}
lowercase__ = spm_file
lowercase__ = load_spm(lowercase_ , self.sp_model_kwargs )
if lang_codes is not None:
lowercase__ = lang_codes
lowercase__ = LANGUAGES[lang_codes]
lowercase__ = [f"""<lang:{lang}>""" for lang in self.langs]
lowercase__ = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
lowercase__ = self.lang_tokens
lowercase__ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowercase__ = {}
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
return len(self.encoder )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def SCREAMING_SNAKE_CASE_ ( self : int , a : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = new_tgt_lang
self.set_tgt_lang_special_tokens(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.lang_code_to_id[tgt_lang]
lowercase__ = [lang_code_id]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Tuple )-> int:
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Tuple )-> List[str]:
"""simple docstring"""
return self.encoder.get(lowercase_ , self.encoder[self.unk_token] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Optional[Any] )-> List[str]:
"""simple docstring"""
return self.decoder.get(lowercase_ , self.unk_token )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Optional[int] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowercase__ = self.sp_model.decode(lowercase_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowercase__ = []
else:
current_sub_tokens.append(lowercase_ )
lowercase__ = self.sp_model.decode(lowercase_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : Any , a : List[Any] , a : Optional[Any]=None )-> int:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Any , a : Tuple = None , a : Union[str, Any] = False )-> Optional[Any]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
lowercase__ = [1] * len(self.prefix_tokens )
lowercase__ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )-> Any:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : Tuple , a : Optional[int] )-> Optional[int]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = load_spm(self.spm_file , self.sp_model_kwargs )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Tuple = None )-> str:
"""simple docstring"""
lowercase__ = Path(lowercase_ )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
lowercase__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowercase__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowercase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowercase_ , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (str(lowercase_ ), str(lowercase_ ))
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = sentencepiece.SentencePieceProcessor(**__lowerCamelCase )
spm.Load(str(__lowerCamelCase ) )
return spm
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]:
with open(__lowerCamelCase , 'r' ) as f:
return json.load(__lowerCamelCase )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
with open(__lowerCamelCase , 'w' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=2 )
| 351
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str:
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowercase__ = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase__ = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
lowercase__ = 'relu'
lowercase__ = state_dict['decoder.embed_tokens.weight']
lowercase__ = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
lowercase_ = parser.parse_args()
lowercase_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 269
| 0
|
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase = threading.Lock()
UpperCAmelCase = None
UpperCAmelCase = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
UpperCAmelCase = logging.WARNING
UpperCAmelCase = True
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =os.getenv('TRANSFORMERS_VERBOSITY', lowercase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def __UpperCamelCase ( ):
'''simple docstring'''
return __name__.split('.' )[0]
def __UpperCamelCase ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __UpperCamelCase ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__lowercase =logging.StreamHandler() # Set sys.stderr as stream.
__lowercase =sys.stderr.flush
# Apply our default configuration to the library root logger.
__lowercase =_get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__lowercase =False
def __UpperCamelCase ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__lowercase =_get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__lowercase =None
def __UpperCamelCase ( ):
'''simple docstring'''
return log_levels
def __UpperCamelCase ( lowercase__ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__lowercase =_get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
return set_verbosity(lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
return set_verbosity(lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
return set_verbosity(lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
return set_verbosity(lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __UpperCamelCase ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __UpperCamelCase ( lowercase__ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowercase__ )
def __UpperCamelCase ( lowercase__ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
_configure_library_root_logger()
__lowercase =False
def __UpperCamelCase ( ):
'''simple docstring'''
_configure_library_root_logger()
__lowercase =True
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =_get_library_root_logger().handlers
for handler in handlers:
__lowercase =logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =_get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowercase__ )
def __UpperCamelCase ( self : Optional[int], *lowercase__ : int, **lowercase__ : str ):
'''simple docstring'''
__lowercase =os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS', lowercase__ )
if no_advisory_warnings:
return
self.warning(*lowercase__, **lowercase__ )
UpperCAmelCase = warning_advice
@functools.lru_cache(lowercase__ )
def __UpperCamelCase ( self : Union[str, Any], *lowercase__ : Optional[Any], **lowercase__ : Optional[int] ):
'''simple docstring'''
self.warning(*lowercase__, **lowercase__ )
UpperCAmelCase = warning_once
class lowerCAmelCase :
def __init__( self : List[Any] , *__lowercase : str , **__lowercase : Optional[Any] ): # pylint: disable=unused-argument
"""simple docstring"""
__lowercase =args[0] if args else None
def __iter__( self : str ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : str , __lowercase : Dict ):
"""simple docstring"""
def empty_fn(*__lowercase : Any , **__lowercase : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ):
"""simple docstring"""
return self
def __exit__( self : List[Any] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : List[Any] ):
"""simple docstring"""
return
class lowerCAmelCase :
def __call__( self : Tuple , *__lowercase : int , **__lowercase : Union[str, Any] ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*__lowercase , **__lowercase )
else:
return EmptyTqdm(*__lowercase , **__lowercase )
def snake_case ( self : Tuple , *__lowercase : Tuple , **__lowercase : Dict ):
"""simple docstring"""
__lowercase =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowercase , **__lowercase )
def snake_case ( self : int ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase = _tqdm_cls()
def __UpperCamelCase ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __UpperCamelCase ( ):
'''simple docstring'''
global _tqdm_active
__lowercase =True
hf_hub_utils.enable_progress_bars()
def __UpperCamelCase ( ):
'''simple docstring'''
global _tqdm_active
__lowercase =False
hf_hub_utils.disable_progress_bars()
| 141
|
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : Union[str, Any]=2_81_23 ):
'''simple docstring'''
__lowercase =[1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
__lowercase =set()
__lowercase =0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 141
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , ) -> Optional[Any]:
lowercase__ : List[str] = parent
lowercase__ : List[Any] = 13
lowercase__ : Union[str, Any] = 7
lowercase__ : Dict = True
lowercase__ : Dict = True
lowercase__ : str = True
lowercase__ : str = 99
lowercase__ : Optional[int] = 32
lowercase__ : str = 2
lowercase__ : str = 4
lowercase__ : Any = 37
lowercase__ : Any = '''gelu'''
lowercase__ : List[Any] = 0.1
lowercase__ : str = 0.1
lowercase__ : Union[str, Any] = 512
lowercase__ : Any = 16
lowercase__ : Tuple = 2
lowercase__ : List[str] = 0.0_2
lowercase__ : str = 3
lowercase__ : str = 4
lowercase__ : str = None
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Optional[Any] = None
if self.use_input_mask:
lowercase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Any = None
lowercase__ : str = None
if self.use_labels:
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Optional[int] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase( self ) -> int:
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : int = self.prepare_config_and_inputs()
lowercase__ : List[Any] = True
lowercase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
lowercase__ : Optional[int] = TFEsmModel(config=__lowerCAmelCase )
lowercase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase__ : List[Any] = model(__lowerCAmelCase )
lowercase__ : List[str] = [input_ids, input_mask]
lowercase__ : str = model(__lowerCAmelCase )
lowercase__ : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Union[str, Any]:
lowercase__ : List[Any] = True
lowercase__ : Union[str, Any] = TFEsmModel(config=__lowerCAmelCase )
lowercase__ : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowercase__ : Dict = model(__lowerCAmelCase )
lowercase__ : int = [input_ids, input_mask]
lowercase__ : Tuple = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
lowercase__ : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : List[str] = TFEsmForMaskedLM(config=__lowerCAmelCase )
lowercase__ : int = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
lowercase__ : Tuple = self.num_labels
lowercase__ : Any = TFEsmForTokenClassification(config=__lowerCAmelCase )
lowercase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase__ : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase( self ) -> int:
lowercase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Any = config_and_inputs
lowercase__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : str = TFEsmModelTester(self )
lowercase__ : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _lowerCAmelCase( self ) -> Dict:
self.config_tester.run_common_tests()
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def _lowerCAmelCase( self ) -> Union[str, Any]:
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def _lowerCAmelCase( self ) -> str:
pass
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase__ : int = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
lowercase__ : Any = model.get_output_embeddings()
assert x is None
lowercase__ : Optional[int] = model.get_bias()
assert name is None
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Any = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase__ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ : Optional[Any] = model(__lowerCAmelCase )[0]
lowercase__ : List[Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
lowercase__ : Union[str, Any] = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def _lowerCAmelCase( self ) -> str:
lowercase__ : Any = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase__ : Optional[Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase__ : Optional[Any] = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowercase__ : int = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 214
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , UpperCAmelCase )
lowercase__ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase__ : str = dataset_size < in_memory_max_size
else:
lowercase__ : Optional[int] = False
lowercase__ : Optional[Any] = is_small_dataset(UpperCAmelCase )
assert result == expected
| 214
| 1
|
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase (snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase (snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any]=True ) -> List[str]:
'''simple docstring'''
model.train()
lowerCAmelCase = model(snake_case__ )
lowerCAmelCase = F.mse_loss(snake_case__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case__ )
def lowercase (snake_case__ : int , snake_case__ : Optional[int]=False ) -> int:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase = RegressionModel()
lowerCAmelCase = deepcopy(snake_case__ )
lowerCAmelCase = RegressionDataset(length=80 )
lowerCAmelCase = DataLoader(snake_case__ , batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
lowerCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowerCAmelCase = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
lowerCAmelCase = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
lowerCAmelCase , lowerCAmelCase = accelerator.prepare(snake_case__ , snake_case__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase (snake_case__ : Any ) -> int:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = get_training_setup(snake_case__ )
# Use a single batch
lowerCAmelCase , lowerCAmelCase = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCAmelCase , lowerCAmelCase = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase , lowerCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowerCAmelCase = ddp_input[torch.randperm(len(snake_case__ ) )]
def lowercase (snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = get_training_setup(snake_case__ )
# Use a single batch
lowerCAmelCase , lowerCAmelCase = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCAmelCase , lowerCAmelCase = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase , lowerCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowerCAmelCase = ddp_input[torch.randperm(len(snake_case__ ) )]
def lowercase (snake_case__ : List[str]=False , snake_case__ : List[str]=False ) -> Any:
'''simple docstring'''
lowerCAmelCase = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = get_training_setup(snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
lowerCAmelCase , lowerCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCAmelCase , lowerCAmelCase = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase , lowerCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowerCAmelCase = ddp_input[torch.randperm(len(snake_case__ ) )]
GradientState._reset_state()
def lowercase (snake_case__ : List[Any]=False , snake_case__ : int=False ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = get_training_setup(snake_case__ , snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
lowerCAmelCase , lowerCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCAmelCase , lowerCAmelCase = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase , lowerCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
lowerCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def lowercase () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = Accelerator()
lowerCAmelCase = RegressionDataset(length=80 )
lowerCAmelCase = DataLoader(snake_case__ , batch_size=16 )
lowerCAmelCase = RegressionDataset(length=96 )
lowerCAmelCase = DataLoader(snake_case__ , batch_size=16 )
lowerCAmelCase , lowerCAmelCase = accelerator.prepare(snake_case__ , snake_case__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if iteration < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if batch_num < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase () -> Dict:
'''simple docstring'''
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(snake_case__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(snake_case__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(snake_case__ , snake_case__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ )
def lowercase (snake_case__ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 155
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def __lowercase ( self : Tuple ):
super().setUp()
# fmt: off
lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase ) )
def __lowercase ( self : Optional[Any] , **lowerCAmelCase : str ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __lowercase ( self : Any , **lowerCAmelCase : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Dict ):
lowerCAmelCase = """lower newer"""
lowerCAmelCase = """lower newer"""
return input_text, output_text
def __lowercase ( self : int ):
lowerCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = tokens + [tokenizer.unk_token]
lowerCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
@require_ftfy
def __lowercase ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase = """xa\u0303y""" + """ """ + """x\xe3y"""
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Any ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase = f'''{text_of_1_token} {text_of_1_token}'''
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , )
lowerCAmelCase = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
lowerCAmelCase = f''' {text}'''
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , )
lowerCAmelCase = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ) + 1, 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
def __lowercase ( self : Dict ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def __lowercase ( self : Optional[int] ):
super().test_tokenization_python_rust_equals()
def __lowercase ( self : Optional[int] ):
# CLIP always lower cases letters
pass
| 155
| 1
|
"""simple docstring"""
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : int = len(UpperCamelCase_ )
_lowerCAmelCase : int = len(UpperCamelCase_ )
_lowerCAmelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCAmelCase : list = []
for char_count in range(UpperCamelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCamelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 362
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowerCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __snake_case :
lowerCAmelCase__ = field(
default=_a , metadata={"help": "Model type selected in the list: " + ", ".join(_a )} )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
lowerCAmelCase__ = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
lowerCAmelCase__ = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
lowerCAmelCase__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase__ = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class __snake_case (_a ):
lowerCAmelCase__ = "train"
lowerCAmelCase__ = "dev"
class __snake_case (_a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self : Tuple , _UpperCAmelCase : SquadDataTrainingArguments , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Union[str, Split] = Split.train , _UpperCAmelCase : Optional[bool] = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = args
_lowerCAmelCase : Union[str, Any] = is_language_sensitive
_lowerCAmelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
_lowerCAmelCase : List[str] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase : List[str] = mode
# Load data features from cache or dataset file
_lowerCAmelCase : List[Any] = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase : Any = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase : Union[str, Any] = cached_features_file + """.lock"""
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
_lowerCAmelCase : Dict = time.time()
_lowerCAmelCase : Any = torch.load(_UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase : List[str] = self.old_features["""features"""]
_lowerCAmelCase : List[Any] = self.old_features.get("""dataset""" , _UpperCAmelCase )
_lowerCAmelCase : int = self.old_features.get("""examples""" , _UpperCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase : List[str] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_UpperCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _UpperCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Any , _UpperCAmelCase : Dict ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_lowerCAmelCase : str = self.features[i]
_lowerCAmelCase : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase : int = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 159
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = "poolformer"
def __init__( self , __A=3 , __A=16 , __A=16 , __A=3 , __A=4.0 , __A=[2, 2, 6, 2] , __A=[64, 128, 320, 512] , __A=[7, 3, 3, 3] , __A=[4, 2, 2, 2] , __A=[2, 1, 1, 1] , __A=4 , __A=0.0 , __A="gelu" , __A=True , __A=1E-5 , __A=0.0_2 , **__A , ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = num_channels
lowerCAmelCase_ :Tuple = patch_size
lowerCAmelCase_ :Tuple = stride
lowerCAmelCase_ :Optional[int] = padding
lowerCAmelCase_ :int = pool_size
lowerCAmelCase_ :List[str] = hidden_sizes
lowerCAmelCase_ :str = mlp_ratio
lowerCAmelCase_ :Optional[int] = depths
lowerCAmelCase_ :str = patch_sizes
lowerCAmelCase_ :Optional[int] = strides
lowerCAmelCase_ :Optional[int] = num_encoder_blocks
lowerCAmelCase_ :Dict = drop_path_rate
lowerCAmelCase_ :int = hidden_act
lowerCAmelCase_ :Optional[int] = use_layer_scale
lowerCAmelCase_ :Tuple = layer_scale_init_value
lowerCAmelCase_ :List[Any] = initializer_range
super().__init__(**__A )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 2E-3
| 84
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "bert-generation"
def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Any = vocab_size
lowerCAmelCase_ :List[Any] = hidden_size
lowerCAmelCase_ :Optional[int] = num_hidden_layers
lowerCAmelCase_ :int = num_attention_heads
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Optional[Any] = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_dropout_prob
lowerCAmelCase_ :int = attention_probs_dropout_prob
lowerCAmelCase_ :Tuple = max_position_embeddings
lowerCAmelCase_ :List[str] = initializer_range
lowerCAmelCase_ :Union[str, Any] = layer_norm_eps
lowerCAmelCase_ :List[str] = position_embedding_type
lowerCAmelCase_ :Optional[int] = use_cache
| 84
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Any = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowercase__ : Dict = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowercase__ : Any = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def _lowerCAmelCase ( __snake_case : Any ) -> Optional[Any]:
__A : Dict = (images / 2 + 0.5).clamp(0 , 1 )
__A : str = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__A : Dict = numpy_to_pil(__snake_case )
return images
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Optional[Any]:
if images.ndim == 3:
__A : List[Any] = images[None, ...]
__A : List[str] = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__A : str = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
__A : str = [Image.fromarray(__snake_case ) for image in images]
return pil_images
| 190
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = '''marian'''
SCREAMING_SNAKE_CASE_ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self ,SCREAMING_SNAKE_CASE__=5_81_01 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=10_24 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=10_24 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=5_81_00 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=5_81_00 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=True ,**SCREAMING_SNAKE_CASE__ ,) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE :Tuple = decoder_vocab_size or vocab_size
__SCREAMING_SNAKE_CASE :Tuple = max_position_embeddings
__SCREAMING_SNAKE_CASE :List[str] = d_model
__SCREAMING_SNAKE_CASE :Optional[Any] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE :List[Any] = encoder_layers
__SCREAMING_SNAKE_CASE :Union[str, Any] = encoder_attention_heads
__SCREAMING_SNAKE_CASE :List[Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE :List[Any] = decoder_layers
__SCREAMING_SNAKE_CASE :Optional[int] = decoder_attention_heads
__SCREAMING_SNAKE_CASE :Dict = dropout
__SCREAMING_SNAKE_CASE :Dict = attention_dropout
__SCREAMING_SNAKE_CASE :Optional[Any] = activation_dropout
__SCREAMING_SNAKE_CASE :Any = activation_function
__SCREAMING_SNAKE_CASE :List[Any] = init_std
__SCREAMING_SNAKE_CASE :List[str] = encoder_layerdrop
__SCREAMING_SNAKE_CASE :Optional[Any] = decoder_layerdrop
__SCREAMING_SNAKE_CASE :Tuple = use_cache
__SCREAMING_SNAKE_CASE :Dict = encoder_layers
__SCREAMING_SNAKE_CASE :int = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE :Dict = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,forced_eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
class _SCREAMING_SNAKE_CASE( A ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE :Optional[Any] = {0: '''batch'''}
__SCREAMING_SNAKE_CASE :Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE :List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
__SCREAMING_SNAKE_CASE :List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE :int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE :Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :Tuple = super().outputs
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = super(SCREAMING_SNAKE_CASE__ ,self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE :str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE :Optional[Any] = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE :List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE :int = dict(**SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = common_inputs['''input_ids'''].shape
__SCREAMING_SNAKE_CASE :Union[str, Any] = common_inputs['''decoder_input_ids'''].shape[1]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :int = self.num_attention_heads
__SCREAMING_SNAKE_CASE :int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE :List[str] = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE :str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE :Optional[int] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )] ,dim=1 )
__SCREAMING_SNAKE_CASE :Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = self.num_layers
__SCREAMING_SNAKE_CASE :Optional[int] = min(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = max(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) - min_num_layers
__SCREAMING_SNAKE_CASE :List[str] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE :List[str] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) )
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE :List[Any] = seqlen + 2
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = self.num_layers
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = self.num_attention_heads
__SCREAMING_SNAKE_CASE :str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE :Tuple = common_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE :int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )] ,dim=1 )
__SCREAMING_SNAKE_CASE :Dict = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(SCREAMING_SNAKE_CASE__ )
]
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE :Tuple = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE :Dict = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE :Dict = dict(tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ) )
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Any = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :List[str] = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Dict = super(SCREAMING_SNAKE_CASE__ ,self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4
| 191
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Any = '''xmod'''
def __init__( self ,SCREAMING_SNAKE_CASE__=3_05_22 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__="absolute" ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=("en_XX",) ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE :List[Any] = hidden_size
__SCREAMING_SNAKE_CASE :List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = intermediate_size
__SCREAMING_SNAKE_CASE :Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :List[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE :Optional[Any] = position_embedding_type
__SCREAMING_SNAKE_CASE :Any = use_cache
__SCREAMING_SNAKE_CASE :List[str] = classifier_dropout
__SCREAMING_SNAKE_CASE :Any = pre_norm
__SCREAMING_SNAKE_CASE :Dict = adapter_reduction_factor
__SCREAMING_SNAKE_CASE :Dict = adapter_layer_norm
__SCREAMING_SNAKE_CASE :Dict = adapter_reuse_layer_norm
__SCREAMING_SNAKE_CASE :Tuple = ln_before_adapter
__SCREAMING_SNAKE_CASE :Any = list(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = default_language
class _SCREAMING_SNAKE_CASE( A ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE :Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 191
| 1
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_UpperCamelCase = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
_UpperCamelCase = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
_UpperCamelCase = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ) -> Any:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__UpperCAmelCase : List[Any] = np.array([re.sub(__UpperCAmelCase , """""" , __UpperCAmelCase ) for x in predictions] )
__UpperCAmelCase : int = np.array([re.sub(__UpperCAmelCase , """""" , __UpperCAmelCase ) for x in references] )
else:
__UpperCAmelCase : str = np.asarray(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = np.asarray(__UpperCAmelCase )
if ignore_case:
__UpperCAmelCase : Tuple = np.char.lower(__UpperCAmelCase )
__UpperCAmelCase : str = np.char.lower(__UpperCAmelCase )
if ignore_punctuation:
__UpperCAmelCase : Union[str, Any] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__UpperCAmelCase : Optional[Any] = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase )
__UpperCAmelCase : str = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase )
if ignore_numbers:
__UpperCAmelCase : Union[str, Any] = string.digits.maketrans("""""" , """""" , string.digits )
__UpperCAmelCase : Dict = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = predictions == references
return {"exact_match": np.mean(__UpperCAmelCase ) * 100}
| 368
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
@staticmethod
def __A ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Optional[int] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = object_detector(examples[0] , threshold=0.0 )
__UpperCAmelCase : Tuple = len(__UpperCAmelCase )
self.assertGreater(__UpperCAmelCase , 0 )
self.assertEqual(
__UpperCAmelCase , [
{
"""score""": ANY(__UpperCAmelCase ),
"""label""": ANY(__UpperCAmelCase ),
"""box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )},
}
for i in range(__UpperCAmelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__UpperCAmelCase : Optional[int] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
__UpperCAmelCase : str = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
__UpperCAmelCase : Any = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
@require_torch
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 0.2
__UpperCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : Optional[int] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" )
__UpperCAmelCase : List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 16
| 0
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> Optional[int]:
if not is_accelerate_available():
return method
_snake_case = version.parse(accelerate.__version__ ).base_version
if version.parse(__lowerCamelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : Tuple , *__lowerCamelCase : Dict , **__lowerCamelCase : List[str] ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *__lowerCamelCase , **__lowerCamelCase )
return wrapper
| 288
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCAmelCase__ = logging.getLogger(__name__)
class lowerCAmelCase__ ( A_ ):
__a = """masked_bert"""
def __init__( self : Union[str, Any] , _lowerCamelCase : Any=30522 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : str=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : int=0.0_2 , _lowerCamelCase : Union[str, Any]=1e-12 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : List[str]="topK" , _lowerCamelCase : Optional[Any]="constant" , _lowerCamelCase : Optional[Any]=0.0 , **_lowerCamelCase : str , ):
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = pruning_method
_snake_case = mask_init
_snake_case = mask_scale
| 288
| 1
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCamelCase_ = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/"))
a_ = self.transformer_dir
shutil.copy(
os.path.join(__UpperCAmelCase , "src/transformers/models/bert/modeling_bert.py") , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py") , )
def UpperCAmelCase__ ( self) ->int:
a_ = "src/transformers"
shutil.rmtree(self.transformer_dir)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->Dict:
a_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
a_ = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
a_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19)
a_ = black.format_str(__UpperCAmelCase , mode=__UpperCAmelCase)
a_ = os.path.join(self.transformer_dir , "new_code.py")
with open(__UpperCAmelCase , "w" , newline="\n") as f:
f.write(__UpperCAmelCase)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCAmelCase)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCAmelCase)
with open(__UpperCAmelCase , "r") as f:
self.assertTrue(f.read() , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead")
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , __UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , __UpperCAmelCase) , )
# Copy consistency with a really long name
a_ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("Bert" , __UpperCAmelCase , __UpperCAmelCase) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , __UpperCAmelCase , overwrite_result=re.sub("Bert" , "TestModel" , __UpperCAmelCase) , )
def UpperCAmelCase__ ( self) ->int:
a_ = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
a_ , a_ = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme["format_model_list"])
self.assertFalse(__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
a_ , a_ = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme["format_model_list"])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__UpperCAmelCase)
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
a_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
a_ , a_ = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme["format_model_list"])
# Check if the model link is synchronized.
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
| 303
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303
| 1
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A_ ( _UpperCAmelCase ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = '''sigmoid'''
_UpperCAmelCase : str = '''softmax'''
_UpperCAmelCase : List[Any] = '''none'''
@add_end_docstrings(
UpperCAmelCase_ , r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Tuple = ClassificationFunction.NONE
def __init__( self : Union[str, Any] , **lowerCAmelCase__ : int):
super().__init__(**lowerCAmelCase__)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[str]="" , **lowerCAmelCase__ : Union[str, Any]):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE_: int = tokenizer_kwargs
SCREAMING_SNAKE_CASE_: Dict = {}
if hasattr(self.model.config , "return_all_scores") and return_all_scores is None:
SCREAMING_SNAKE_CASE_: List[str] = self.model.config.return_all_scores
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) or top_k is None:
SCREAMING_SNAKE_CASE_: Any = top_k
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , lowerCAmelCase__ , )
if return_all_scores:
SCREAMING_SNAKE_CASE_: int = None
else:
SCREAMING_SNAKE_CASE_: str = 1
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: List[str] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE_: List[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : int , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = super().__call__(*lowerCAmelCase__ , **lowerCAmelCase__)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE_: Tuple = "top_k" not in kwargs
if isinstance(args[0] , lowerCAmelCase__) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.framework
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
return self.tokenizer(**lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__)
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) == 1 and isinstance(inputs[0] , lowerCAmelCase__) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__)
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.")
return self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[Any]):
return self.model(**lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : Optional[Any]=True):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE_: Optional[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply") and function_to_apply is None:
SCREAMING_SNAKE_CASE_: Tuple = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE_: List[str] = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE_: List[Any] = model_outputs["logits"][0]
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE_: List[str] = sigmoid(lowerCAmelCase__)
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE_: Tuple = softmax(lowerCAmelCase__)
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE_: List[str] = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}")
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE_: Dict = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(lowerCAmelCase__)
]
if not _legacy:
dict_scores.sort(key=lambda lowerCAmelCase__: x["score"] , reverse=lowerCAmelCase__)
if top_k is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] = dict_scores[:top_k]
return dict_scores
| 13
|
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : int =9
A__ : int =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A__ : Optional[Any] =kruskal(__snake_case, __snake_case )
A__ : List[str] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__snake_case ) == sorted(__snake_case )
| 134
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''facebook/bart-large-mnli'''
lowercase__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowercase__ = '''text_classifier'''
lowercase__ = AutoTokenizer
lowercase__ = AutoModelForSequenceClassification
lowercase__ = ['''text''', ['''text''']]
lowercase__ = ['''text''']
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().setup()
__UpperCamelCase =self.model.config
__UpperCamelCase =-1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__UpperCamelCase =int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
__UpperCamelCase =labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =outputs.logits
__UpperCamelCase =torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 85
| 1
|
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__lowerCAmelCase : Dict = getLogger(__name__)
def a__ ( A_, A_, A_, A_ = 8, A_ = 1024, A_="val", A_=None, A_=False, A_="summarization", A_=None, A_=1, A_ = None, A_="", **A_, ):
'''simple docstring'''
__magic_name__ = str(A_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""", rank=A_ )
__magic_name__ = Path(A_ )
__magic_name__ = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(A_ )
__magic_name__ = AutoModelForSeqaSeqLM.from_pretrained(A_ ).cuda()
if fpaa:
__magic_name__ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(A_, A_ ) # update config with task specific params
__magic_name__ = generate_kwargs.pop("""num_beams""", model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__magic_name__ = num_return_sequences
__magic_name__ = AutoTokenizer.from_pretrained(A_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
__magic_name__ = tokenizer.model_max_length
if prefix is None:
__magic_name__ = prefix or getattr(model.config, """prefix""", """""" ) or ''
__magic_name__ = SeqaSeqDataset(
A_, A_, A_, max_target_length=1024, type_path=A_, n_obs=A_, prefix=A_, **A_, )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__magic_name__ = ds.make_sortish_sampler(A_, distributed=A_, add_extra_examples=A_, shuffle=A_ )
__magic_name__ = DataLoader(A_, sampler=A_, batch_size=A_, collate_fn=ds.collate_fn )
__magic_name__ = []
for batch in tqdm(A_ ):
__magic_name__ = model.generate(
input_ids=batch["""input_ids"""].to(model.device ), attention_mask=batch["""attention_mask"""].to(model.device ), num_return_sequences=A_, num_beams=A_, **A_, )
__magic_name__ = tokenizer.batch_decode(A_, skip_special_tokens=A_, clean_up_tokenization_spaces=A_ )
__magic_name__ = batch['ids']
if num_return_sequences > 1:
__magic_name__ = chunks(A_, A_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(A_ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(A_, A_ )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""", type=A_, help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""", type=A_, help="""like facebook/bart-large-cnn,t5-base, etc.""", default="""sshleifer/distilbart-xsum-12-3""", )
parser.add_argument("""--save_dir""", type=A_, help="""where to save""", default="""tmp_gen""" )
parser.add_argument("""--max_source_length""", type=A_, default=A_ )
parser.add_argument(
"""--type_path""", type=A_, default="""test""", help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""", type=A_, default="""summarization""", help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""", type=A_, default=8, required=A_, help="""batch size""" )
parser.add_argument(
"""--local_rank""", type=A_, default=-1, required=A_, help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""", type=A_, default=A_, required=A_, help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""", type=A_, default=1, required=A_, help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""", type=A_, default=600, required=A_, help="""How long should master process wait for other processes to finish.""", )
parser.add_argument("""--src_lang""", type=A_, default=A_, required=A_ )
parser.add_argument("""--tgt_lang""", type=A_, default=A_, required=A_ )
parser.add_argument(
"""--prefix""", type=A_, required=A_, default=A_, help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""", action="""store_true""" )
parser.add_argument("""--debug""", action="""store_true""" )
__magic_name__ = time.time()
__magic_name__ = parser.parse_known_args()
__magic_name__ = parse_numeric_n_bool_cl_kwargs(A_ )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
__magic_name__ = Path(args.save_dir + """_tmp""" )
Path(A_ ).mkdir(exist_ok=A_ ) # this handles locking.
__magic_name__ = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__magic_name__ = {}
if args.src_lang is not None:
__magic_name__ = args.src_lang
if args.tgt_lang is not None:
__magic_name__ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=A_ )
__magic_name__ = eval_data_dir(
args.data_dir, A_, args.model_name, type_path=args.type_path, bs=args.bs, fpaa=args.fpaa, task=args.task, local_rank=args.local_rank, n_obs=args.n_obs, max_source_length=args.max_source_length, num_return_sequences=args.num_return_sequences, prefix=args.prefix, dataset_kwargs=A_, **A_, )
if args.local_rank <= 0:
__magic_name__ = Path(args.save_dir )
save_dir.mkdir(exist_ok=A_ )
__magic_name__ = gather_results_from_each_node(A_, A_, args.sync_timeout )
__magic_name__ = combine_partial_results(A_ )
if args.num_return_sequences > 1:
__magic_name__ = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(A_, A_ )
return
__magic_name__ = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(A_ ) as f:
__magic_name__ = [x.rstrip() for x in f.readlines()][: len(A_ )]
# Calculate metrics, save metrics, and save _generations.txt
__magic_name__ = 'translation' in args.task
__magic_name__ = calculate_bleu if calc_bleu else calculate_rouge
__magic_name__ = 'bleu' if calc_bleu else 'rouge'
__magic_name__ = score_fn(A_, A_ )
__magic_name__ = len(A_ )
__magic_name__ = time.time() - start_time
__magic_name__ = round(runtime / metrics["""n_obs"""], 4 )
__magic_name__ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__magic_name__ = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(A_, A_, indent=A_ )
print(A_ )
write_txt_file(A_, save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(A_, save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = []
for partial_result in partial_results:
records.extend(A_ )
__magic_name__ = sorted(A_, key=lambda A_ : x["id"] )
__magic_name__ = [x['pred'] for x in records]
return preds
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = time.time()
logger.info("""waiting for all nodes to finish""" )
__magic_name__ = None
while (time.time() - start_wait) < timeout:
__magic_name__ = list(save_dir.glob("""rank_*.json""" ) )
if len(A_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__magic_name__ = lmap(A_, A_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 88
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ :List[str] = logging.get_logger(__name__)
A_ :Any = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
A_ :Optional[Any] = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
A_ :Union[str, Any] = {'''facebook/blenderbot-3B''': 128}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Dict =VOCAB_FILES_NAMES
UpperCamelCase__ : List[str] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[int] =["""input_ids""", """attention_mask"""]
UpperCamelCase__ : Optional[int] =BlenderbotTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , lowerCamelCase__=True , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : Any =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__UpperCamelCase : List[str] =getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) )
__UpperCamelCase : Union[str, Any] =add_prefix_space
__UpperCamelCase : int =pre_tok_class(**lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =add_prefix_space
__UpperCamelCase : Dict ='post_processor'
__UpperCamelCase : List[Any] =getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
__UpperCamelCase : Dict =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase : Tuple =tuple(state['sep'] )
if "cls" in state:
__UpperCamelCase : List[Any] =tuple(state['cls'] )
__UpperCamelCase : List[Any] =False
if state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__UpperCamelCase : Tuple =add_prefix_space
__UpperCamelCase : Tuple =True
if state.get('trim_offsets' , lowerCamelCase__ ) != trim_offsets:
__UpperCamelCase : Dict =trim_offsets
__UpperCamelCase : List[str] =True
if changes_to_apply:
__UpperCamelCase : str =getattr(lowerCamelCase__ , state.pop('type' ) )
__UpperCamelCase : Tuple =component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowercase ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
__UpperCamelCase : Dict =value
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =kwargs.get('is_split_into_words' , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =kwargs.get('is_split_into_words' , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[str] =self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Any =[self.sep_token_id]
__UpperCamelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase__ )
__UpperCamelCase : int =' '.join(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =self.encode(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.model_max_length:
__UpperCamelCase : List[str] =input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 350
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A_ :Any = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
A_ :Dict = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A_ :str = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[str] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A_ :int = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Tuple = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
A_ :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
A_ :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
A_ :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
A_ :Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
A_ :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
A_ :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
A_ :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
A_ :Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
A_ :Optional[Any] = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
A_ :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
A_ :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
A_ :str = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
A_ :str = ''''''
A_ :Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[str] = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def A ( a_ ,a_ ) -> List[str]:
assert ReadMe.from_string(a_ ,a_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def A ( a_ ,a_ ) -> int:
with pytest.raises(a_ ,match=re.escape(expected_error.format(path='root' ) ) ):
__UpperCamelCase : List[Any] =ReadMe.from_string(a_ ,a_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ,a_ ) -> Union[str, Any]:
with pytest.raises(a_ ,match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(a_ ,a_ )
@pytest.mark.parametrize(
'readme_md,' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ) -> Tuple:
ReadMe.from_string(a_ ,a_ ,suppress_parsing_errors=a_ )
@pytest.mark.parametrize(
'readme_md, expected_dict' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def A ( a_ ,a_ ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Dict =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[Any] =ReadMe.from_readme(a_ ,a_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def A ( a_ ,a_ ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Any =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[int] =expected_error.format(path=a_ )
with pytest.raises(a_ ,match=re.escape(a_ ) ):
__UpperCamelCase : List[str] =ReadMe.from_readme(a_ ,a_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ,a_ ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Optional[Any] =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[int] =expected_error.format(path=a_ )
with pytest.raises(a_ ,match=re.escape(a_ ) ):
ReadMe.from_readme(a_ ,a_ )
@pytest.mark.parametrize(
'readme_md,' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Union[str, Any] =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
ReadMe.from_readme(a_ ,a_ ,suppress_parsing_errors=a_ )
| 245
| 0
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__UpperCAmelCase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
__UpperCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Any = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
_a : int = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
torch.manual_seed(0 )
_a : Any = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=_a ,set_alpha_to_one=_a ,)
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : int = CLIPTextModel(_a )
_a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : List[str] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : str ,_a : Optional[Any] ,_a : Union[str, Any]=0 ):
'''simple docstring'''
if str(_a ).startswith('mps' ):
_a : Optional[Any] = torch.manual_seed(_a )
else:
_a : Dict = torch.Generator(device=_a ).manual_seed(_a )
_a : str = 2
_a : Union[str, Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_a ,device=torch.device(_a ) ,)
_a : Optional[Any] = floats_tensor(control_image.shape ,rng=random.Random(_a ) ).to(_a )
_a : Any = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : Dict = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
_a : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowercase ( self : str ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowercase ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowercase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
def init_weights(_a : Any ):
if isinstance(_a ,torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a : List[str] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
_a : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
_a : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=_a ,set_alpha_to_one=_a ,)
torch.manual_seed(0 )
_a : Tuple = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : int = CLIPTextModel(_a )
_a : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Optional[int] = MultiControlNetModel([controlneta, controlneta] )
_a : List[Any] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : Any ,_a : Optional[Any] ,_a : str=0 ):
'''simple docstring'''
if str(_a ).startswith('mps' ):
_a : Dict = torch.manual_seed(_a )
else:
_a : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
_a : Dict = 2
_a : List[str] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_a ,device=torch.device(_a ) ,),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_a ,device=torch.device(_a ) ,),
]
_a : List[Any] = floats_tensor(control_image[0].shape ,rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : Any = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
_a : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Any = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
pipe.to(_a )
_a : str = 10.0
_a : List[str] = 4
_a : int = self.get_dummy_inputs(_a )
_a : Tuple = steps
_a : Optional[int] = scale
_a : Dict = pipe(**_a )[0]
_a : Tuple = self.get_dummy_inputs(_a )
_a : Tuple = steps
_a : Dict = scale
_a : List[Any] = pipe(**_a ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0]
_a : Optional[Any] = self.get_dummy_inputs(_a )
_a : int = steps
_a : List[str] = scale
_a : Any = pipe(**_a ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0]
_a : Union[str, Any] = self.get_dummy_inputs(_a )
_a : List[str] = steps
_a : List[Any] = scale
_a : Union[str, Any] = pipe(**_a ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowercase ( self : Dict ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = self.get_dummy_components()
_a : List[str] = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Dict = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
_a : Any = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,safety_checker=_a ,controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
_a : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_a : str = 'evil space-punk bird'
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
_a : Union[str, Any] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
_a : Any = pipe(
_a ,_a ,control_image=_a ,generator=_a ,output_type='np' ,num_inference_steps=50 ,strength=0.6 ,)
_a : int = output.images[0]
assert image.shape == (512, 512, 3)
_a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 271
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Optional[Any]=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_a : str = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_a : Any = nn.Parameter(__a )
def UpperCAmelCase_ (__a : int , __a : Optional[Any] , __a : int ):
"""simple docstring"""
_a : Tuple = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Optional[int] , __a : List[str] ):
"""simple docstring"""
_a : Dict = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : str = np.asarray(weights[2] )
_a : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Any , __a : Any , __a : Optional[Any] ):
"""simple docstring"""
_a : List[str] = weights[0][0][0]
_a : List[Any] = np.asarray(layer_norm_a[0] )
_a : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# lsh weights + output
_a : List[str] = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a , torch_block.attention , __a )
else:
set_layer_weights_in_torch_local(__a , torch_block.attention , __a )
# intermediate weighs
_a : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
_a : Union[str, Any] = intermediate_weights[2]
# layernorm 2
_a : Any = np.asarray(intermediate_weights[0][0] )
_a : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# intermediate dense
_a : Any = np.asarray(intermediate_weights[1][0] )
_a : Any = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
# intermediate out
_a : Optional[int] = np.asarray(intermediate_weights[4][0] )
_a : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Dict , __a : Dict , __a : List[Any] ):
"""simple docstring"""
_a : Optional[int] = torch_model.reformer
# word embeds
_a : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__a ) , )
if isinstance(weights[3] , __a ):
_a : Any = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_a : Any = nn.Parameter(torch.tensor(__a ) )
_a : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a , __a , __a )
# output layer norm
_a : Optional[Any] = np.asarray(weights[7][0] )
_a : int = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# output embeddings
_a : List[str] = np.asarray(weights[9][0] )
_a : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Tuple , __a : Optional[Any] , __a : Dict ):
"""simple docstring"""
_a : List[Any] = ReformerConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : int = ReformerModelWithLMHead(__a )
with open(__a , 'rb' ) as f:
_a : Optional[Any] = pickle.load(__a )['weights']
set_model_weights_in_torch(__a , __a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 271
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
snake_case__ : str = '''Create a default config file for Accelerate with only a few flags set.'''
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any]="no" , lowerCamelCase_ : str = default_json_config_file , lowerCamelCase_ : bool = False ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Path(lowerCamelCase_ )
path.parent.mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
UpperCAmelCase_ : Tuple = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
UpperCAmelCase_ : Optional[int] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase_ : Any = torch.cuda.device_count()
UpperCAmelCase_ : Optional[Any] = num_gpus
UpperCAmelCase_ : Tuple = False
if num_gpus > 1:
UpperCAmelCase_ : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase_ : Tuple = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase_ : Dict = torch.xpu.device_count()
UpperCAmelCase_ : int = num_xpus
UpperCAmelCase_ : Dict = False
if num_xpus > 1:
UpperCAmelCase_ : str = 'MULTI_XPU'
else:
UpperCAmelCase_ : Tuple = 'NO'
elif is_npu_available():
UpperCAmelCase_ : Union[str, Any] = torch.npu.device_count()
UpperCAmelCase_ : List[Any] = num_npus
UpperCAmelCase_ : Optional[Any] = False
if num_npus > 1:
UpperCAmelCase_ : Any = 'MULTI_NPU'
else:
UpperCAmelCase_ : Any = 'NO'
else:
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Tuple = 'NO'
UpperCAmelCase_ : Optional[int] = ClusterConfig(**lowerCamelCase_ )
config.to_json_file(lowerCamelCase_ )
return path
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parser.add_parser('default' , parents=lowerCamelCase_ , help=lowerCamelCase_ , formatter_class=lowerCamelCase_ )
parser.add_argument(
'--config_file' , default=lowerCamelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=lowerCamelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=lowerCamelCase_ )
return parser
def _lowerCamelCase ( lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCAmelCase_ : Dict = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 274
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( *snake_case_ , **snake_case_ ):
'''simple docstring'''
pass
def _lowerCamelCase ( lowerCamelCase_ : Image ):
"""simple docstring"""
UpperCAmelCase_ : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :Optional[int] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = DepthEstimationPipeline(model=snake_case_ , image_processor=snake_case_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , snake_case_ )
import datasets
UpperCAmelCase_ : Optional[int] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
UpperCAmelCase_ : Union[str, Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , snake_case_ , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@slow
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = 'Intel/dpt-large'
UpperCAmelCase_ : Dict = pipeline('depth-estimation' , model=snake_case_ )
UpperCAmelCase_ : Optional[int] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
UpperCAmelCase_ : Union[str, Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.6_62 )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 274
| 1
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( a__ ):
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> None:
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.', SCREAMING_SNAKE_CASE_, )
super().__init__(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
| 119
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = "canine"
def __init__( self, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=1_6384, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=0XE000, SCREAMING_SNAKE_CASE_=0XE001, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=1_6384, SCREAMING_SNAKE_CASE_=128, **SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Tuple = type_vocab_size
UpperCamelCase : Any = layer_norm_eps
# Character config:
UpperCamelCase : List[Any] = downsampling_rate
UpperCamelCase : Optional[int] = upsampling_kernel_size
UpperCamelCase : Tuple = num_hash_functions
UpperCamelCase : Union[str, Any] = num_hash_buckets
UpperCamelCase : str = local_transformer_stride
| 119
| 1
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowercase_ = 5_0_0_0_3
lowercase_ = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = PLBartTokenizer
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : str = False
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = PLBartTokenizer(_a , language_codes='''base''' , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self ):
__a = PLBartTokenizer(_a , language_codes='''base''' , keep_accents=_a )
__a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__a = tokenizer.vocab_size
__a = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 4 , _a )]
self.assertListEqual(_a , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
__a = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__a = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
def __UpperCAmelCase ( self ):
__a = PLBartTokenizer(_a , language_codes='''multi''' , keep_accents=_a )
__a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__a = tokenizer.vocab_size
__a = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 7 , _a )]
self.assertListEqual(
_a , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
__a = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__a = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'uclanlp/plbart-python-en_XX'
__UpperCAmelCase : str = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__UpperCAmelCase : str = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__UpperCAmelCase : Optional[int] = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def __UpperCAmelCase ( cls ):
__a = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
__a = 1
return cls
def __UpperCAmelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 )
def __UpperCAmelCase ( self ):
__a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _a )
def __UpperCAmelCase ( self ):
self.assertIn(_a , self.tokenizer.all_special_ids )
__a = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
__a = self.tokenizer.decode(_a , skip_special_tokens=_a )
__a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
self.assertNotIn(self.tokenizer.eos_token , _a )
def __UpperCAmelCase ( self ):
__a = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , _a )
__a = 10
__a = self.tokenizer(_a , max_length=_a , truncation=_a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _a )
self.assertEqual(len(_a ) , _a )
def __UpperCAmelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] )
def __UpperCAmelCase ( self ):
__a = tempfile.mkdtemp()
__a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_a )
__a = PLBartTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _a )
@require_torch
def __UpperCAmelCase ( self ):
__a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors='''pt''' )
__a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __UpperCAmelCase ( self ):
__a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_a , truncation=_a , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
__a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __UpperCAmelCase ( self ):
__a = self.tokenizer(self.src_text , padding=_a , truncation=_a , max_length=3 , return_tensors='''pt''' )
__a = self.tokenizer(
text_target=self.tgt_text , padding=_a , truncation=_a , max_length=10 , return_tensors='''pt''' )
__a = targets['''input_ids''']
__a = shift_tokens_right(_a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __UpperCAmelCase ( self ):
__a = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(_a ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , )
| 11
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase__ ( A_ , unittest.TestCase ):
__a = MobileBertTokenizer
__a = MobileBertTokenizerFast
__a = True
__a = True
__a = filter_non_english
__a = """google/mobilebert-uncased"""
def lowercase ( self : Optional[int] ):
super().setUp()
_snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_snake_case = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def lowercase ( self : Any , _lowerCamelCase : List[str] ):
_snake_case = '''UNwant\u00E9d,running'''
_snake_case = '''unwanted, running'''
return input_text, output_text
def lowercase ( self : Any ):
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowerCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [9, 6, 7, 12, 10, 11] )
def lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = '''UNwant\u00E9d,running'''
_snake_case = tokenizer.tokenize(_lowerCamelCase )
_snake_case = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
_snake_case = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_snake_case = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(_lowerCamelCase )
_snake_case = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=_lowerCamelCase )
_snake_case = self.get_rust_tokenizer(do_lower_case=_lowerCamelCase )
_snake_case = '''UNwant\u00E9d,running'''
_snake_case = tokenizer.tokenize(_lowerCamelCase )
_snake_case = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
_snake_case = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_snake_case = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(_lowerCamelCase )
_snake_case = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Any ):
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowercase ( self : int ):
_snake_case = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase ( self : Tuple ):
_snake_case = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowercase ( self : Optional[int] ):
_snake_case = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase ( self : Tuple ):
_snake_case = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase ( self : str ):
_snake_case = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase ( self : Optional[Any] ):
_snake_case = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase ( self : str ):
_snake_case = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase ( self : Dict ):
_snake_case = BasicTokenizer(do_lower_case=_lowerCamelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowercase ( self : Dict ):
_snake_case = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_snake_case = {}
for i, token in enumerate(_lowerCamelCase ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=_lowerCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def lowercase ( self : str ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowercase ( self : Any ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowercase ( self : int ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def lowercase ( self : Dict ):
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def lowercase ( self : Optional[Any] ):
_snake_case = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
_snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
_snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowercase ( self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_snake_case = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
_snake_case = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , '''do_lower_case''' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def lowercase ( self : List[str] ):
_snake_case = ['''的''', '''人''', '''有''']
_snake_case = ''''''.join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_snake_case = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_snake_case = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_snake_case = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_snake_case = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
_snake_case = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_snake_case = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_snake_case = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_snake_case = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_snake_case = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
_snake_case = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 288
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int = 0 ) -> list:
_snake_case = length or len(__lowerCamelCase )
_snake_case = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_snake_case , _snake_case = list_data[i + 1], list_data[i]
_snake_case = True
return list_data if not swapped else bubble_sort(__lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : List[Any] = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360
|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ = None ,a_ = None ,a_ = True ,a_ = None ,a_ = False ,a_ = None ,a_ = True ,a_ = "arrow" ,**a_ ,) -> str:
super().__init__(
split=a_ ,features=a_ ,cache_dir=a_ ,keep_in_memory=a_ ,streaming=a_ ,**a_ ,)
_UpperCAmelCase : Any = load_from_cache_file
_UpperCAmelCase : Optional[int] = file_format
_UpperCAmelCase : int = Spark(
df=a_ ,features=a_ ,cache_dir=a_ ,working_dir=a_ ,**a_ ,)
def _snake_case ( self ) -> int:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a_ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 349
| 0
|
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A__ : List[Any] ='''sshleifer/bart-tiny-random'''
A__ : Any ='''patrickvonplaten/t5-tiny-random'''
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> int:
return AutoConfig.from_pretrained(_snake_case )
def lowercase__ ( self : Optional[int] ) -> List[str]:
_lowerCAmelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowercase__ ( self : Tuple ) -> Any:
_lowerCAmelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def lowercase__ ( self : Tuple ) -> Tuple:
_lowerCAmelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowercase__ ( self : List[Any] ) -> Any:
_lowerCAmelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowercase__ ( self : Any ) -> List[Any]:
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 70
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16
| 0
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : float , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : bool = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : Optional[int] = nn.Embedding(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ : List[str] = nn.Embedding(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Any = nn.Dropout(p=__lowerCAmelCase )
lowerCAmelCase__ : Any = TaConfig(
vocab_size=__lowerCAmelCase , d_model=__lowerCAmelCase , num_heads=__lowerCAmelCase , d_kv=__lowerCAmelCase , d_ff=__lowerCAmelCase , dropout_rate=__lowerCAmelCase , feed_forward_proj=__lowerCAmelCase , is_decoder=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , )
lowerCAmelCase__ : Tuple = nn.ModuleList()
for lyr_num in range(__lowerCAmelCase ):
lowerCAmelCase__ : int = TaBlock(__lowerCAmelCase )
self.encoders.append(__lowerCAmelCase )
lowerCAmelCase__ : Tuple = TaLayerNorm(__lowerCAmelCase )
lowerCAmelCase__ : Tuple = nn.Dropout(p=__lowerCAmelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.token_embedder(__lowerCAmelCase )
lowerCAmelCase__ : Optional[int] = encoder_input_tokens.shape[1]
lowerCAmelCase__ : Dict = torch.arange(__lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = self.dropout_pre(__lowerCAmelCase )
# inverted the attention mask
lowerCAmelCase__ : Tuple = encoder_input_tokens.size()
lowerCAmelCase__ : Union[str, Any] = self.get_extended_attention_mask(__lowerCAmelCase , __lowerCAmelCase )
for lyr in self.encoders:
lowerCAmelCase__ : Union[str, Any] = lyr(__lowerCAmelCase , __lowerCAmelCase )[0]
lowerCAmelCase__ : Tuple = self.layer_norm(__lowerCAmelCase )
return self.dropout_post(__lowerCAmelCase ), encoder_inputs_mask
| 358
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _lowerCamelCase :
def __init__( self : str , UpperCamelCase : int , UpperCamelCase : str=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Dict=7 , UpperCamelCase : List[Any]=9 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=32 , UpperCamelCase : str=5 , UpperCamelCase : int=4 , UpperCamelCase : Optional[Any]=37 , UpperCamelCase : Tuple=8 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=0.002 , UpperCamelCase : List[Any]=1 , UpperCamelCase : Any=0 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : Dict=None , UpperCamelCase : str=None , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[str] = encoder_seq_length
lowerCAmelCase__ : Any = decoder_seq_length
# For common tests
lowerCAmelCase__ : Union[str, Any] = self.decoder_seq_length
lowerCAmelCase__ : List[Any] = is_training
lowerCAmelCase__ : Optional[Any] = use_attention_mask
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : int = d_ff
lowerCAmelCase__ : int = relative_attention_num_buckets
lowerCAmelCase__ : Union[str, Any] = dropout_rate
lowerCAmelCase__ : str = initializer_factor
lowerCAmelCase__ : Tuple = eos_token_id
lowerCAmelCase__ : List[str] = pad_token_id
lowerCAmelCase__ : str = decoder_start_token_id
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = decoder_layers
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : int=None , UpperCamelCase : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase__ : List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase__ : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase )
if decoder_head_mask is None:
lowerCAmelCase__ : Optional[int] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
if cross_attn_head_mask is None:
lowerCAmelCase__ : Optional[int] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase__ : int = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ : Tuple = self.get_config()
lowerCAmelCase__ : Dict = config.num_attention_heads
lowerCAmelCase__ : Dict = self.prepare_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, input_dict
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Any , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = UMTaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Optional[int] = model(
input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase , attention_mask=UpperCamelCase , decoder_attention_mask=UpperCamelCase , )
lowerCAmelCase__ : Optional[int] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase )
lowerCAmelCase__ : List[Any] = result.last_hidden_state
lowerCAmelCase__ : Any = result.past_key_values
lowerCAmelCase__ : str = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = UMTaModel(config=UpperCamelCase ).get_decoder().to(UpperCamelCase ).eval()
# first forward pass
lowerCAmelCase__ : Optional[int] = model(UpperCamelCase , use_cache=UpperCamelCase )
lowerCAmelCase__ : Any = model(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase__ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : List[str] = model(UpperCamelCase )["""last_hidden_state"""]
lowerCAmelCase__ : Any = model(UpperCamelCase , past_key_values=UpperCamelCase )["""last_hidden_state"""]
# select random slice
lowerCAmelCase__ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase__ : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int , ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = UMTaModel(config=UpperCamelCase ).to(UpperCamelCase ).half().eval()
lowerCAmelCase__ : Dict = model(**UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(UpperCamelCase ).any().item() )
@require_torch
class _lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
_lowerCamelCase :List[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowerCamelCase :List[str] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowerCamelCase :Optional[Any] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowerCamelCase :Dict = True
_lowerCamelCase :Optional[Any] = False
_lowerCamelCase :List[str] = False
_lowerCamelCase :Dict = True
_lowerCamelCase :str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowerCamelCase :Optional[int] = [0.8, 0.9]
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[str] = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = config_and_inputs[0]
lowerCAmelCase__ : int = UMTaForConditionalGeneration(UpperCamelCase ).eval()
model.to(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
}
for attn_name, (name, mask) in zip(UpperCamelCase , head_masking.items() ):
lowerCAmelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase__ : Tuple = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase , return_dict_in_generate=UpperCamelCase , **UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase__ : str = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=UpperCamelCase ).to(UpperCamelCase )
lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=UpperCamelCase , legacy=UpperCamelCase )
lowerCAmelCase__ : int = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
lowerCAmelCase__ : Union[str, Any] = tokenizer(UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase ).input_ids
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = model.generate(input_ids.to(UpperCamelCase ) )
lowerCAmelCase__ : int = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
lowerCAmelCase__ : Any = tokenizer.batch_decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 212
| 0
|
"""simple docstring"""
UpperCamelCase_ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 243
|
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class snake_case :
def __init__( self , __UpperCAmelCase = "cpu" , __UpperCAmelCase = "openai/clip-vit-large-patch14") ->None:
a_ = device
a_ = CLIPTokenizerFast.from_pretrained(__UpperCAmelCase)
a_ = [0.48_145_466, 0.4_578_275, 0.40_821_073]
a_ = [0.26_862_954, 0.26_130_258, 0.27_577_711]
a_ = torchvision.transforms.Normalize(self.image_mean , self.image_std)
a_ = torchvision.transforms.Resize(2_24)
a_ = torchvision.transforms.CenterCrop(2_24)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[Any]:
a_ = self.resize(__UpperCAmelCase)
a_ = self.center_crop(__UpperCAmelCase)
a_ = self.normalize(__UpperCAmelCase)
return images
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase) ->Union[str, Any]:
a_ = self.tokenizer(text=__UpperCAmelCase , **__UpperCAmelCase)
a_ = self.preprocess_img(__UpperCAmelCase)
a_ = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase=10 , __UpperCAmelCase=0.01 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="image" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ) ->None:
super().__init__()
a_ = None
a_ = device if device else get_device()
if vqgan:
a_ = vqgan
else:
a_ = load_vqgan(self.device , conf_path=__UpperCAmelCase , ckpt_path=__UpperCAmelCase)
self.vqgan.eval()
if clip:
a_ = clip
else:
a_ = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
a_ = ProcessorGradientFlow(device=self.device)
a_ = iterations
a_ = lr
a_ = log
a_ = make_grid
a_ = return_val
a_ = quantize
a_ = self.vqgan.decoder.z_shape
def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=5 , __UpperCAmelCase=True) ->Any:
a_ = []
if output_path is None:
a_ = "./animation.gif"
if input_path is None:
a_ = self.save_path
a_ = sorted(glob(input_path + "/*"))
if not len(__UpperCAmelCase):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__UpperCAmelCase) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
a_ = total_duration / len(__UpperCAmelCase)
a_ = [frame_duration] * len(__UpperCAmelCase)
if extend_frames:
a_ = 1.5
a_ = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__UpperCAmelCase))
imageio.mimsave(__UpperCAmelCase , __UpperCAmelCase , duration=__UpperCAmelCase)
print(F'''gif saved to {output_path}''')
def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None) ->List[Any]:
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
a_ = preprocess(Image.open(__UpperCAmelCase) , target_image_size=2_56).to(self.device)
a_ = preprocess_vqgan(__UpperCAmelCase)
a_ , *a_ = self.vqgan.encode(__UpperCAmelCase)
return z
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tuple:
a_ = self.latent.detach().requires_grad_()
a_ = base_latent + transform_vector
if self.quantize:
a_ , *a_ = self.vqgan.quantize(__UpperCAmelCase)
else:
a_ = trans_latent
return self.vqgan.decode(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->str:
a_ = self.clip_preprocessor(text=__UpperCAmelCase , images=__UpperCAmelCase , return_tensors="pt" , padding=__UpperCAmelCase)
a_ = self.clip(**__UpperCAmelCase)
a_ = clip_outputs.logits_per_image
if weights is not None:
a_ = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = self._get_clip_similarity(pos_prompts["prompts"] , __UpperCAmelCase , weights=(1 / pos_prompts["weights"]))
if neg_prompts:
a_ = self._get_clip_similarity(neg_prompts["prompts"] , __UpperCAmelCase , weights=neg_prompts["weights"])
else:
a_ = torch.tensor([1] , device=self.device)
a_ = -torch.log(__UpperCAmelCase) + torch.log(__UpperCAmelCase)
return loss
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = torch.randn_like(self.latent , requires_grad=__UpperCAmelCase , device=self.device)
a_ = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
a_ = self._add_vector(__UpperCAmelCase)
a_ = loop_post_process(__UpperCAmelCase)
a_ = self._get_CLIP_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
print("CLIP loss" , __UpperCAmelCase)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__UpperCAmelCase)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Tuple:
wandb.init(reinit=__UpperCAmelCase , project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
a_ = Image.open(__UpperCAmelCase)
a_ = image.resize((2_56, 2_56))
wandb.log("Original Image" , wandb.Image(__UpperCAmelCase))
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]:
if not prompts:
return []
a_ = []
a_ = []
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__UpperCAmelCase , (tuple, list)):
a_ = prompt[0]
a_ = float(prompt[1])
elif ":" in prompt:
a_ , a_ = prompt.split(":")
a_ = float(__UpperCAmelCase)
else:
a_ = prompt
a_ = 1.0
processed_prompts.append(__UpperCAmelCase)
weights.append(__UpperCAmelCase)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__UpperCAmelCase , device=self.device),
}
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->List[Any]:
if image_path:
a_ = self._get_latent(__UpperCAmelCase)
else:
a_ = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
assert pos_prompts, "You must provide at least one positive prompt."
a_ = self.process_prompts(__UpperCAmelCase)
a_ = self.process_prompts(__UpperCAmelCase)
if save_final and save_path is None:
a_ = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"]))
if not os.path.exists(__UpperCAmelCase):
os.makedirs(__UpperCAmelCase)
else:
a_ = save_path + "_" + get_timestamp()
os.makedirs(__UpperCAmelCase)
a_ = save_path
a_ = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__UpperCAmelCase))
a_ = loop_post_process(__UpperCAmelCase)
for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)):
if show_intermediate:
show_pil(__UpperCAmelCase)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png'''))
if self.log:
wandb.log({"Image": wandb.Image(__UpperCAmelCase)})
if show_final:
show_pil(__UpperCAmelCase)
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png'''))
| 243
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
# create a imagenet -> id dictionary for easier use
lowercase_ : Union[str, Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
lowercase_ : int = int(__SCREAMING_SNAKE_CASE )
lowercase_ : str = dict(sorted(self.labels.items() ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 4.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
lowercase_ : Tuple = len(__SCREAMING_SNAKE_CASE )
lowercase_ : int = self.transformer.config.sample_size
lowercase_ : Dict = self.transformer.config.in_channels
lowercase_ : List[str] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.transformer.dtype , )
lowercase_ : Any = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase_ : Any = torch.tensor(__SCREAMING_SNAKE_CASE , device=self.device ).reshape(-1 )
lowercase_ : List[Any] = torch.tensor([10_00] * batch_size , device=self.device )
lowercase_ : str = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase_ : Optional[Any] = latent_model_input[: len(__SCREAMING_SNAKE_CASE ) // 2]
lowercase_ : List[str] = torch.cat([half, half] , dim=0 )
lowercase_ : Tuple = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : int = t
if not torch.is_tensor(__SCREAMING_SNAKE_CASE ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase_ : int = latent_model_input.device.type == '''mps'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : List[str] = torch.floataa if is_mps else torch.floataa
else:
lowercase_ : Optional[Any] = torch.intaa if is_mps else torch.intaa
lowercase_ : List[str] = torch.tensor([timesteps] , dtype=__SCREAMING_SNAKE_CASE , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase_ : Union[str, Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase_ : Optional[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase_ : Optional[Any] = self.transformer(
__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ).sample
# perform guidance
if guidance_scale > 1:
lowercase_ , lowercase_ : Tuple = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase_ , lowercase_ : Dict = torch.split(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) // 2 , dim=0 )
lowercase_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase_ : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
lowercase_ : List[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase_ , lowercase_ : List[Any] = torch.split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dim=1 )
else:
lowercase_ : Tuple = noise_pred
# compute previous image: x_t -> x_t-1
lowercase_ : List[str] = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
if guidance_scale > 1:
lowercase_ , lowercase_ : str = latent_model_input.chunk(2 , dim=0 )
else:
lowercase_ : Union[str, Any] = latent_model_input
lowercase_ : Dict = 1 / self.vae.config.scaling_factor * latents
lowercase_ : Tuple = self.vae.decode(__SCREAMING_SNAKE_CASE ).sample
lowercase_ : Optional[int] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase_ : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Any = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 264
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Optional[int]=18 , UpperCAmelCase__ : Dict=30 , UpperCAmelCase__ : int=400 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str=True , ) -> Dict:
_a : Union[str, Any] = size if size is not None else {"""shortest_edge""": 20}
_a : Optional[int] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_a : Optional[int] = parent
_a : str = batch_size
_a : Optional[int] = num_channels
_a : str = image_size
_a : List[Any] = min_resolution
_a : Any = max_resolution
_a : List[str] = do_resize
_a : str = size
_a : Optional[Any] = do_center_crop
_a : str = crop_size
_a : List[Any] = do_flip_channel_order
def _lowercase ( self : List[Any] ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : Dict = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self : Any ) -> Union[str, Any]:
_a : Union[str, Any] = MobileViTImageProcessingTester(self )
@property
def _lowercase ( self : Dict ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : str ) -> Tuple:
_a : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_flip_channel_order""" ) )
def _lowercase ( self : str ) -> str:
_a : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _lowercase ( self : Optional[int] ) -> List[Any]:
pass
def _lowercase ( self : Optional[Any] ) -> Tuple:
# Initialize image_processing
_a : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
_a : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_a : str = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowercase ( self : int ) -> Tuple:
# Initialize image_processing
_a : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
_a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_a : Any = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowercase ( self : List[str] ) -> Dict:
# Initialize image_processing
_a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
_a : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_a : str = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 294
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294
| 1
|
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__snake_case = logging.get_logger(__name__)
def a ( __a , __a , __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__a ) == len(__a ), f'''{len(__a )} != {len(__a )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__snake_case = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__snake_case = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a ( __a , __a ) -> Optional[Any]:
'''simple docstring'''
try:
UpperCamelCase__ :int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(__a ) )
def a ( __a , __a ) -> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(__a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a ( __a , __a = "student" , __a = None , __a = None , __a=False , __a=None , __a=None , **__a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(__a , __a ):
AutoTokenizer.from_pretrained(__a ).save_pretrained(__a ) # purely for convenience
UpperCamelCase__ :int = AutoModelForSeqaSeqLM.from_pretrained(__a ).eval()
else:
assert isinstance(__a , __a ), f'''teacher must be a model or string got type {type(__a )}'''
UpperCamelCase__ :Union[str, Any] = teacher.config.to_diff_dict()
try:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCamelCase__ :Optional[Any] = teacher_e
if d is None:
UpperCamelCase__ :List[str] = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
UpperCamelCase__ , UpperCamelCase__ :int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCamelCase__ , UpperCamelCase__ :Dict = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCamelCase__ :Optional[int] = teacher_e
if d is None:
UpperCamelCase__ :Any = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__a )
# Copy weights
UpperCamelCase__ :List[Any] = teacher.config_class(**__a )
UpperCamelCase__ :List[Any] = AutoModelForSeqaSeqLM.from_config(__a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCamelCase__ :Dict = student.load_state_dict(teacher.state_dict() , strict=__a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCamelCase__ , UpperCamelCase__ :Dict = list(range(__a ) ), list(range(__a ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(__a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCamelCase__ :List[int] = pick_layers_to_copy(__a , __a )
if d_layers_to_copy is None:
UpperCamelCase__ :List[int] = pick_layers_to_copy(__a , __a )
try:
if hasattr(
__a , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __a )
copy_layers(teacher.decoder.block , student.decoder.block , __a )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
UpperCamelCase__ :Tuple = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(__a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 219
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase__ :Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(__a )
# Let's go
UpperCamelCase__ :Optional[int] = parser.parse_args()
if not hasattr(__a , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ :Optional[int] = args.func(__a )
service.run()
if __name__ == "__main__":
main()
| 219
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.