code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=30 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.0_2 , _UpperCamelCase=None , ):
"""simple docstring"""
_lowercase : List[str] = parent
_lowercase : str = batch_size
_lowercase : List[Any] = image_size
_lowercase : Optional[Any] = patch_size
_lowercase : Tuple = num_channels
_lowercase : List[str] = is_training
_lowercase : Dict = use_labels
_lowercase : List[str] = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Any = attention_probs_dropout_prob
_lowercase : Dict = type_sequence_label_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : str = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : List[str] = (image_size // patch_size) ** 2
_lowercase : List[Any] = num_patches + 1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : str = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Any = ViTMSNModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowercase : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = self.type_sequence_label_size
_lowercase : List[str] = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowercase : Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase : int = 1
_lowercase : int = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowercase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Any = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[Any] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = ViTMSNModelTester(self )
_lowercase : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : str = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = model_class(_UpperCAmelCase )
_lowercase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : List[str] = [*signature.parameters.keys()]
_lowercase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = ViTMSNModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _A ( ) -> List[Any]:
_lowercase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(2 )
_lowercase : int = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(_UpperCAmelCase )
_lowercase : int = self.default_image_processor
_lowercase : Optional[int] = prepare_img()
_lowercase : List[Any] = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Optional[Any] = model(**_UpperCAmelCase )
# verify the logits
_lowercase : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowercase : int = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 245
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = KandinskyVaaControlnetPipeline
lowerCAmelCase_ : int = ["""image_embeds""", """negative_image_embeds""", """hint"""]
lowerCAmelCase_ : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
lowerCAmelCase_ : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase_ : List[Any] = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase__ = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.dummy_unet
UpperCAmelCase__ = self.dummy_movq
UpperCAmelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_UpperCAmelCase , )
UpperCAmelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict=0 ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCAmelCase )
# create hint
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase__ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """cpu"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
UpperCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
UpperCAmelCase__ = torch.from_numpy(np.array(_UpperCAmelCase ) ).float() / 255.0
UpperCAmelCase__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCAmelCase )
UpperCAmelCase__ = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
UpperCAmelCase__ = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = """A robot, 4k photo"""
UpperCAmelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ = pipe_prior(
_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
UpperCAmelCase__ = pipeline(
image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , hint=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=1_00 , output_type="""np""" , )
UpperCAmelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 603
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[Any] = DDIMPipeline
__magic_name__ : str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__magic_name__ : Any = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
__magic_name__ : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__magic_name__ : Optional[int] = False
def _UpperCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
A__ : Optional[int] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
A__ : int =DDIMScheduler()
A__ : Union[str, Any] ={"unet": unet, "scheduler": scheduler}
return components
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=0 ):
if str(UpperCamelCase__ ).startswith("mps" ):
A__ : Dict =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Any =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : Optional[int] ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCAmelCase ( self : List[Any] ):
A__ : List[str] ="cpu"
A__ : List[str] =self.get_dummy_components()
A__ : Union[str, Any] =self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[Any] =self.get_dummy_inputs(UpperCamelCase__ )
A__ : List[Any] =pipe(**UpperCamelCase__ ).images
A__ : Union[str, Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A__ : Tuple =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
A__ : int =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1E-3 )
def _UpperCAmelCase ( self : Tuple ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _UpperCAmelCase ( self : Optional[Any] ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _UpperCAmelCase ( self : int ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _UpperCAmelCase ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : int ):
A__ : Optional[Any] ="google/ddpm-cifar10-32"
A__ : List[Any] =UNetaDModel.from_pretrained(UpperCamelCase__ )
A__ : Optional[int] =DDIMScheduler()
A__ : Optional[int] =DDIMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ddim.to(UpperCamelCase__ )
ddim.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[int] =torch.manual_seed(0 )
A__ : str =ddim(generator=UpperCamelCase__ , eta=0.0 , output_type="numpy" ).images
A__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Dict =np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : int ):
A__ : Dict ="google/ddpm-ema-bedroom-256"
A__ : List[str] =UNetaDModel.from_pretrained(UpperCamelCase__ )
A__ : Optional[Any] =DDIMScheduler.from_pretrained(UpperCamelCase__ )
A__ : Optional[Any] =DDIMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ddpm.to(UpperCamelCase__ )
ddpm.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Dict =torch.manual_seed(0 )
A__ : int =ddpm(generator=UpperCamelCase__ , output_type="numpy" ).images
A__ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ : int =np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 595
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 595
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__a = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 374
| 0
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowercase__ = "bart"
lowercase__ = True
@st.cache(allow_output_mutation=_snake_case )
def __magic_name__ ( ):
if LOAD_DENSE_INDEX:
__a : str = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__a : Optional[Any] = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : Tuple = (None, None)
if MODEL_TYPE == "bart":
__a : Optional[int] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__a : Dict = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__a : Tuple = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__a : Optional[Any] = sas_model.eval()
else:
__a , __a : str = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_snake_case )
def __magic_name__ ( ):
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Tuple = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__a : Union[str, Any] = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 1_2_8) , )
__a : List[str] = faiss.IndexFlatIP(1_2_8 )
__a : Dict = faiss.index_cpu_to_gpu(_snake_case , 1 , _snake_case )
wikiaab_gpu_index_flat.add(_snake_case ) # TODO fix for larger GPU
else:
__a , __a : int = (None, None)
__a : Any = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_snake_case )
def __magic_name__ ( ):
__a : Optional[int] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__a : Optional[int] = elia["""train_eli5"""]
__a : Optional[int] = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 1_2_8) )
__a : Optional[int] = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(_snake_case )
return (elia_train, eli5_train_q_index)
lowercase__ , lowercase__ , lowercase__ = load_indexes()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = load_models()
lowercase__ , lowercase__ = load_train_data()
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any]=1_0 ):
__a : int = embed_questions_for_retrieval([question] , _snake_case , _snake_case )
__a , __a : Optional[int] = eli5_train_q_index.search(_snake_case , _snake_case )
__a : Optional[int] = [elia_train[int(_snake_case )] for i in I[0]]
return nn_examples
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]="wiki40b" , _lowerCamelCase : Dict="dense" , _lowerCamelCase : Dict=1_0 ):
if source == "none":
__a , __a : int = (""" <P> """.join(["""""" for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
__a , __a : List[str] = query_qa_dense_index(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
__a , __a : List[str] = query_es_index(
_snake_case , _snake_case , index_name="""english_wiki40b_snippets_100w""" , n_results=_snake_case , )
__a : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__a : Union[str, Any] = """question: {} context: {}""".format(_snake_case , _snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any]=6_4 , _lowerCamelCase : int=2_5_6 , _lowerCamelCase : str=False , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Tuple=0.95 , _lowerCamelCase : int=0.8 ):
with torch.no_grad():
__a : Dict = qa_sas_generate(
_snake_case , _snake_case , _snake_case , num_answers=1 , num_beams=_snake_case , min_len=_snake_case , max_len=_snake_case , do_sample=_snake_case , temp=_snake_case , top_p=_snake_case , top_k=_snake_case , max_input_length=1_0_2_4 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
lowercase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
lowercase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowercase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
lowercase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
lowercase__ = st.sidebar.checkbox("Demo options")
if demo_options:
lowercase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
lowercase__ = action_list.index(action_st)
lowercase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
lowercase__ = show_type == "Show full text of passages"
else:
lowercase__ = 3
lowercase__ = True
lowercase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
lowercase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
lowercase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
lowercase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
lowercase__ = "wiki40b"
lowercase__ = "dense"
lowercase__ = "beam"
lowercase__ = 2
lowercase__ = 64
lowercase__ = 256
lowercase__ = None
lowercase__ = None
lowercase__ = st.sidebar.checkbox("Generation options")
if generate_options:
lowercase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
lowercase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
lowercase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowercase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowercase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowercase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowercase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowercase__ = None
# start main text
lowercase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
lowercase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowercase__ = st.text_input("Enter your question here:", "")
else:
lowercase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
lowercase__ , lowercase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
lowercase__ , lowercase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
lowercase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowercase__ = support_list[:10]
lowercase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
lowercase__ , lowercase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowercase__ , lowercase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
lowercase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
lowercase__ = res[1].strip()
if sec_titles == "":
lowercase__ = "[{}]({})".format(res[0], wiki_url)
else:
lowercase__ = sec_titles.split(" & ")
lowercase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
lowercase__ = find_nearest_training(question)
lowercase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
lowercase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
lowercase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 711
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Tuple = OpenAIGPTTokenizer
_lowerCAmelCase : Optional[Any] = OpenAIGPTTokenizerFast
_lowerCAmelCase : str = True
_lowerCAmelCase : Optional[int] = False
def _snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case_ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ : int = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowercase_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def _snake_case ( self : Dict , lowercase_ : List[Any] ):
return "lower newer", "lower newer"
def _snake_case ( self : str ):
snake_case_ : Optional[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
snake_case_ : int = '''lower'''
snake_case_ : Any = ['''low''', '''er</w>''']
snake_case_ : List[str] = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : Optional[int] = tokens + ['''<unk>''']
snake_case_ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def _snake_case ( self : Tuple , lowercase_ : Optional[int]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
snake_case_ : Optional[Any] = '''This is a simple input'''
snake_case_ : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case_ : List[Any] = ('''This is a simple input''', '''This is a pair''')
snake_case_ : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' , )
def _snake_case ( self : Any ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__):
pass
| 123
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase__ : int = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : str = ["""input_features""", """attention_mask"""]
def __init__( self : Optional[int] , lowercase_ : str=80 , lowercase_ : Optional[int]=16000 , lowercase_ : List[Any]=0.0 , lowercase_ : int=10 , lowercase_ : Optional[int]=25 , lowercase_ : List[Any]="hamming_window" , lowercase_ : Tuple=3_27_68.0 , lowercase_ : Any=0.97 , lowercase_ : Dict=1.0 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=True , lowercase_ : Optional[Any]=False , **lowercase_ : str , ):
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_ )
snake_case_ : List[Any] = feature_size
snake_case_ : List[Any] = sampling_rate
snake_case_ : str = padding_value
snake_case_ : List[Any] = hop_length
snake_case_ : Dict = win_length
snake_case_ : Optional[int] = frame_signal_scale
snake_case_ : int = preemphasis_coeff
snake_case_ : Optional[int] = mel_floor
snake_case_ : List[str] = normalize_means
snake_case_ : Union[str, Any] = normalize_vars
snake_case_ : Optional[int] = win_function
snake_case_ : List[Any] = return_attention_mask
snake_case_ : Any = win_length * sampling_rate // 1000
snake_case_ : Union[str, Any] = hop_length * sampling_rate // 1000
snake_case_ : Dict = optimal_fft_length(self.sample_size )
snake_case_ : List[str] = (self.n_fft // 2) + 1
def _snake_case ( self : Union[str, Any] , lowercase_ : np.array ):
if self.win_function == "hamming_window":
snake_case_ : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase_ )
else:
snake_case_ : Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function )
snake_case_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
snake_case_ : str = spectrogram(
one_waveform * self.frame_signal_scale , window=lowercase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowercase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowercase_ , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : int , lowercase_ : List[Any] ):
# make sure we normalize float32 arrays
if self.normalize_means:
snake_case_ : int = x[:input_length].mean(axis=0 )
snake_case_ : int = np.subtract(lowercase_ , lowercase_ )
if self.normalize_vars:
snake_case_ : Tuple = x[:input_length].std(axis=0 )
snake_case_ : Union[str, Any] = np.divide(lowercase_ , lowercase_ )
if input_length < x.shape[0]:
snake_case_ : Tuple = padding_value
# make sure array is in float32
snake_case_ : List[Any] = x.astype(np.floataa )
return x
def _snake_case ( self : Union[str, Any] , lowercase_ : List[np.ndarray] , lowercase_ : Optional[np.ndarray] = None ):
snake_case_ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowercase_ , lowercase_ , self.padding_value ) for x, n in zip(lowercase_ , lowercase_ )]
def __call__( self : Tuple , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[int] = None , **lowercase_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
snake_case_ : Dict = isinstance(lowercase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
snake_case_ : List[Any] = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ : int = [np.asarray(lowercase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray ):
snake_case_ : int = np.asarray(lowercase_ , dtype=np.floataa )
elif isinstance(lowercase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Dict = [raw_speech]
# extract fbank features
snake_case_ : Optional[int] = [self._extract_mfsc_features(lowercase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
snake_case_ : int = BatchFeature({'''input_features''': features} )
snake_case_ : Tuple = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
# make sure list is in array format
snake_case_ : Dict = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , lowercase_ ):
snake_case_ : Tuple = [np.asarray(lowercase_ , dtype=np.floataa ) for feature in input_features]
snake_case_ : List[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
snake_case_ : Tuple = [np.asarray(lowercase_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
snake_case_ : str = (
np.array(lowercase_ , dtype=np.intaa )
if self._get_padding_strategies(lowercase_ , max_length=lowercase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
snake_case_ : Any = self.normalize(
padded_inputs['''input_features'''] , attention_mask=lowercase_ )
if return_tensors is not None:
snake_case_ : Any = padded_inputs.convert_to_tensors(lowercase_ )
return padded_inputs
| 123
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class A__ ( a_ ):
UpperCAmelCase = "pix2struct_text_model"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[Any] , _a : Optional[int]=5_0244 , _a : Union[str, Any]=768 , _a : Optional[int]=64 , _a : List[Any]=2048 , _a : List[Any]=12 , _a : Union[str, Any]=12 , _a : Tuple=32 , _a : List[str]=128 , _a : List[str]=0.1 , _a : Union[str, Any]=1E-6 , _a : List[Any]=1.0 , _a : str="gelu_new" , _a : List[str]=0 , _a : Tuple=False , _a : Tuple=0 , _a : Any=1 , _a : Tuple=False , _a : Optional[int]=True , **_a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =d_kv
_SCREAMING_SNAKE_CASE =d_ff
_SCREAMING_SNAKE_CASE =num_layers
_SCREAMING_SNAKE_CASE =num_heads
_SCREAMING_SNAKE_CASE =relative_attention_num_buckets
_SCREAMING_SNAKE_CASE =relative_attention_max_distance
_SCREAMING_SNAKE_CASE =dropout_rate
_SCREAMING_SNAKE_CASE =layer_norm_epsilon
_SCREAMING_SNAKE_CASE =initializer_factor
_SCREAMING_SNAKE_CASE =use_cache
_SCREAMING_SNAKE_CASE =eos_token_id
_SCREAMING_SNAKE_CASE =decoder_start_token_id
# for backwards compatibility
_SCREAMING_SNAKE_CASE =dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def __UpperCamelCase ( cls : int , _a : Union[str, os.PathLike] , **_a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
cls._set_token_in_kwargs(lowercase_ )
_SCREAMING_SNAKE_CASE =cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
_SCREAMING_SNAKE_CASE =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class A__ ( a_ ):
UpperCAmelCase = "pix2struct_vision_model"
def __init__( self : int , _a : Optional[int]=768 , _a : Dict=768 , _a : Dict=2048 , _a : Optional[Any]=64 , _a : List[Any]=12 , _a : Optional[Any]=12 , _a : Union[str, Any]="gelu_new" , _a : str=1E-6 , _a : Any=0.0 , _a : Any=0.0 , _a : Optional[int]=1E-10 , _a : str=1.0 , _a : str=4096 , _a : Union[str, Any]=32 , _a : int=128 , **_a : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**lowercase_ )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =patch_embed_hidden_size
_SCREAMING_SNAKE_CASE =d_ff
_SCREAMING_SNAKE_CASE =dropout_rate
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =initializer_factor
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =dense_act_fn
_SCREAMING_SNAKE_CASE =seq_len
_SCREAMING_SNAKE_CASE =relative_attention_num_buckets
_SCREAMING_SNAKE_CASE =relative_attention_max_distance
_SCREAMING_SNAKE_CASE =d_kv
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , _a : Union[str, os.PathLike] , **_a : int ) -> Any:
"""simple docstring"""
cls._set_token_in_kwargs(lowercase_ )
_SCREAMING_SNAKE_CASE =cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
_SCREAMING_SNAKE_CASE =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class A__ ( a_ ):
UpperCAmelCase = "pix2struct"
UpperCAmelCase = True
def __init__( self : Tuple , _a : Optional[int]=None , _a : Dict=None , _a : Dict=1.0 , _a : Union[str, Any]=0.02 , _a : Union[str, Any]=False , _a : int=False , _a : Optional[Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
_SCREAMING_SNAKE_CASE =PixaStructTextConfig(**lowercase_ )
_SCREAMING_SNAKE_CASE =PixaStructVisionConfig(**lowercase_ )
_SCREAMING_SNAKE_CASE =self.text_config.decoder_start_token_id
_SCREAMING_SNAKE_CASE =self.text_config.pad_token_id
_SCREAMING_SNAKE_CASE =self.text_config.eos_token_id
_SCREAMING_SNAKE_CASE =initializer_factor
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =self.initializer_range
_SCREAMING_SNAKE_CASE =self.initializer_range
_SCREAMING_SNAKE_CASE =is_vqa
@classmethod
def __UpperCamelCase ( cls : Tuple , _a : PixaStructTextConfig , _a : PixaStructVisionConfig , **_a : str ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.text_config.to_dict()
_SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 691
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any ,lowercase_ : Dict ,lowercase_ : List[str]=7 ,lowercase_ : Tuple=3 ,lowercase_ : List[str]=1_8 ,lowercase_ : Optional[Any]=3_0 ,lowercase_ : List[Any]=4_0_0 ,lowercase_ : List[Any]=True ,lowercase_ : Any=None ,lowercase_ : Optional[Any]=True ,lowercase_ : str=None ,lowercase_ : List[Any]=True ,lowercase_ : Dict=[0.5, 0.5, 0.5] ,lowercase_ : Dict=[0.5, 0.5, 0.5] ,):
lowerCAmelCase__ : Any = size if size is not None else {'''shortest_edge''': 1_8}
lowerCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Union[str, Any] = min_resolution
lowerCAmelCase__ : Dict = max_resolution
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Tuple = do_center_crop
lowerCAmelCase__ : Optional[int] = crop_size
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Tuple = image_mean
lowerCAmelCase__ : int = image_std
def __lowerCAmelCase ( self : List[str] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = LevitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Optional[Any] = LevitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase_ ,'''image_std''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_center_crop''' ) )
self.assertTrue(hasattr(lowercase_ ,'''size''' ) )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size ,{'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
def __lowerCAmelCase ( self : Optional[int] ):
# Initialize image_processing
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Union[str, Any] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCAmelCase ( self : int ):
# Initialize image_processing
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 450
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Union[str, Any] = """realm"""
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=1E-3 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=320 , _SCREAMING_SNAKE_CASE=1335_3718 , _SCREAMING_SNAKE_CASE=5000 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Common config
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = retriever_proj_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = num_candidates
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
# Reader config
a_ = span_hidden_size
a_ = max_span_width
a_ = reader_layer_norm_eps
a_ = reader_beam_size
a_ = reader_seq_len
# Retrieval config
a_ = num_block_records
a_ = searcher_beam_size
| 712
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_A = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
a_ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
a_ = json.loads(UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
a_ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
a_ = json.loads(UpperCamelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def __magic_name__ ( self ):
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , )
@cached_property
def __magic_name__ ( self ):
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
a_ = torch.device("""cpu""" )
a_ = 0
elif is_sagemaker_model_parallel_available():
a_ = smp.local_rank()
a_ = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE )
a_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
a_ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
a_ = torch.device("""cuda""" , self.local_rank )
a_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
a_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
a_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
a_ = torch.device("""cuda""" , self.local_rank )
a_ = 1
if device.type == "cuda":
torch.cuda.set_device(_SCREAMING_SNAKE_CASE )
return device
@property
def __magic_name__ ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __magic_name__ ( self ):
return not is_sagemaker_model_parallel_available()
@property
def __magic_name__ ( self ):
return False
| 403
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74
|
from pathlib import Path
import fire
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = Path(snake_case )
__SCREAMING_SNAKE_CASE : Dict = Path(snake_case )
dest_dir.mkdir(exist_ok=snake_case )
for path in src_dir.iterdir():
__SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
__SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name )
print(snake_case )
dest_path.open('''w''' ).write('''\n'''.join(snake_case ) )
if __name__ == "__main__":
fire.Fire(minify)
| 74
| 1
|
'''simple docstring'''
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowercase ( __UpperCAmelCase ):
def __init__(self , A ):
lowerCamelCase_ : Dict = data
def __iter__(self ):
for element in self.data:
yield element
def lowercase_ ( _lowercase=True ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : str = Accelerator(even_batches=__lowerCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase = False ) -> Dict:
'''simple docstring'''
if iterable:
lowerCamelCase_ : Optional[Any] = DummyIterableDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
else:
lowerCamelCase_ : str = TensorDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
lowerCamelCase_ : List[str] = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase )
lowerCamelCase_ : Dict = accelerator.prepare(__lowerCAmelCase )
return dl
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = create_dataloader(accelerator=__lowerCAmelCase , dataset_size=__lowerCAmelCase , batch_size=__lowerCAmelCase )
lowerCamelCase_ : Optional[Any] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : List[str] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Any = create_accelerator(even_batches=__lowerCAmelCase )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : int = create_accelerator(even_batches=__lowerCAmelCase )
lowerCamelCase_ : Union[str, Any] = torch.nn.Linear(1 , 1 )
lowerCamelCase_ : Any = accelerator.prepare(__lowerCAmelCase )
lowerCamelCase_ : Tuple = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
lowerCamelCase_ : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCAmelCase ):
lowerCamelCase_ : Tuple = ddp_model(batch[0].float() )
lowerCamelCase_ : Optional[int] = output.sum()
loss.backward()
batch_idxs.append(__lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowercase_ ( _lowercase ) -> Optional[Any]:
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : Optional[Any] = create_accelerator(even_batches=__lowerCAmelCase )
lowerCamelCase_ : Optional[int] = torch.nn.Linear(1 , 1 )
lowerCamelCase_ : Optional[int] = accelerator.prepare(__lowerCAmelCase )
lowerCamelCase_ : Optional[Any] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
lowerCamelCase_ : Tuple = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
lowerCamelCase_ : Any = train_dl.batch_sampler.even_batches
lowerCamelCase_ : Optional[Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : int = False
lowerCamelCase_ : Any = create_accelerator(even_batches=__lowerCAmelCase )
lowerCamelCase_ : int = torch.nn.Linear(1 , 1 )
lowerCamelCase_ : Optional[Any] = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
lowerCamelCase_ : str = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
lowerCamelCase_ : Dict = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = create_accelerator()
lowerCamelCase_ : List[str] = torch.nn.Linear(1 , 1 )
lowerCamelCase_ : Optional[Any] = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
lowerCamelCase_ : str = accelerator.state.distributed_type
lowerCamelCase_ : Optional[Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCAmelCase )
lowerCamelCase_ : List[str] = original_state
if __name__ == "__main__":
main()
| 712
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ : Optional[Any] = json.loads(_lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ : List[str] = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ : List[str] = json.loads(_lowercase )
if not mpi_options.get('''sagemaker_mpi_enabled''' , _lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __lowercase ( _lowercase ):
lowerCamelCase : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def UpperCAmelCase__ (self ):
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , A , )
@cached_property
def UpperCAmelCase__ (self ):
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
lowerCamelCase_ : Optional[int] = torch.device('''cpu''' )
lowerCamelCase_ : Dict = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ : int = smp.local_rank()
lowerCamelCase_ : str = torch.device('''cuda''' , A )
lowerCamelCase_ : Tuple = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
lowerCamelCase_ : List[Any] = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
lowerCamelCase_ : Union[str, Any] = torch.device('''cuda''' , self.local_rank )
lowerCamelCase_ : List[str] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ : List[str] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ : Any = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
lowerCamelCase_ : Any = torch.device('''cuda''' , self.local_rank )
lowerCamelCase_ : List[str] = 1
if device.type == "cuda":
torch.cuda.set_device(A )
return device
@property
def UpperCAmelCase__ (self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase__ (self ):
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase__ (self ):
return False
| 357
| 0
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
UpperCamelCase : Tuple = logging.get_logger(__name__)
class A__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Tuple ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , _A , )
super().__init__(*_A , **_A )
| 37
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCamelCase :
'''simple docstring'''
A_ : Tuple = XGLMConfig
A_ : str = {}
A_ : Any = """gelu"""
def __init__( self : Union[str, Any] , _A : List[str] , _A : Dict=14 , _A : Any=7 , _A : Any=True , _A : Tuple=True , _A : Union[str, Any]=True , _A : Any=99 , _A : Optional[Any]=32 , _A : Union[str, Any]=2 , _A : Union[str, Any]=4 , _A : List[Any]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[int]=0.1 , _A : List[Any]=512 , _A : Tuple=0.02 , ) -> str:
__magic_name__ : Any = parent
__magic_name__ : Dict = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : int = is_training
__magic_name__ : List[Any] = use_input_mask
__magic_name__ : Tuple = use_labels
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : Union[str, Any] = d_model
__magic_name__ : str = num_hidden_layers
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : Any = ffn_dim
__magic_name__ : List[str] = activation_function
__magic_name__ : Optional[Any] = activation_dropout
__magic_name__ : List[Any] = attention_dropout
__magic_name__ : Optional[Any] = max_position_embeddings
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : Optional[Any] = None
__magic_name__ : str = 0
__magic_name__ : Optional[Any] = 2
__magic_name__ : int = 1
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
__magic_name__ : Tuple = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__magic_name__ : int = None
if self.use_input_mask:
__magic_name__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_A , )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
__magic_name__ : List[Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Tuple = config_and_inputs
__magic_name__ : Optional[int] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
A_ : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
A_ : Union[str, Any] = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
A_ : Dict = False
A_ : Union[str, Any] = False
A_ : Tuple = False
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
__magic_name__ : Optional[Any] = TFXGLMModelTester(self )
__magic_name__ : Dict = ConfigTester(self , config_class=_A , n_embd=37 )
def __lowerCAmelCase ( self : Any ) -> Dict:
self.config_tester.run_common_tests()
@slow
def __lowerCAmelCase ( self : Any ) -> int:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Union[str, Any] = TFXGLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
super().test_resize_token_embeddings()
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : str , _A : Optional[int]=True ) -> Dict:
__magic_name__ : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__magic_name__ : str = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__magic_name__ : str = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__magic_name__ : Union[str, Any] = model.generate(_A , do_sample=_A , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _A )
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__magic_name__ : List[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__magic_name__ : Optional[int] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__magic_name__ : Optional[int] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__magic_name__ : Tuple = model.generate(_A , do_sample=_A , seed=[7, 0] )
__magic_name__ : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=_A )
__magic_name__ : str = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(_A , _A )
@slow
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__magic_name__ : Optional[Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__magic_name__ : Dict = 'left'
# use different length sentences to test batching
__magic_name__ : str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__magic_name__ : int = tokenizer(_A , return_tensors='tf' , padding=_A )
__magic_name__ : Dict = inputs['input_ids']
__magic_name__ : Any = model.generate(input_ids=_A , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__magic_name__ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__magic_name__ : Dict = model.generate(input_ids=_A , max_new_tokens=12 )
__magic_name__ : Union[str, Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__magic_name__ : int = model.generate(input_ids=_A , max_new_tokens=12 )
__magic_name__ : Dict = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__magic_name__ : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A )
__magic_name__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=_A )
__magic_name__ : List[str] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , [non_padded_sentence, padded_sentence] )
| 561
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _a ( SCREAMING_SNAKE_CASE__ : Namespace ) -> Dict:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowerCamelCase : Any = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
@staticmethod
def A_ ( _UpperCAmelCase : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.add_parser(
"convert", help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.", )
train_parser.add_argument("--model_type", type=_UpperCAmelCase, required=_UpperCAmelCase, help="Model\'s type." )
train_parser.add_argument(
"--tf_checkpoint", type=_UpperCAmelCase, required=_UpperCAmelCase, help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output", type=_UpperCAmelCase, required=_UpperCAmelCase, help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config", type=_UpperCAmelCase, default="", help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name", type=_UpperCAmelCase, default=_UpperCAmelCase, help="Optional fine-tuning task name if the TF model was a finetuned model.", )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self : str, _UpperCAmelCase : str, _UpperCAmelCase : str, _UpperCAmelCase : str, _UpperCAmelCase : str, _UpperCAmelCase : str, *_UpperCAmelCase : Optional[int], ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'''Loading model {model_type}''' )
SCREAMING_SNAKE_CASE__ : int = model_type
SCREAMING_SNAKE_CASE__ : str = tf_checkpoint
SCREAMING_SNAKE_CASE__ : int = pytorch_dump_output
SCREAMING_SNAKE_CASE__ : Dict = config
SCREAMING_SNAKE_CASE__ : str = finetuning_task_name
def A_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE__ : str = self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : Dict = ""
else:
SCREAMING_SNAKE_CASE__ : List[str] = self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : int = ""
convert_transfo_xl_checkpoint_to_pytorch(
_UpperCAmelCase, self._config, self._pytorch_dump_output, _UpperCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 706
|
from copy import deepcopy
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int], _UpperCAmelCase : list[int] | None = None, _UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
if arr is None and size is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : str = [0] * size
elif arr is not None:
self.init(_UpperCAmelCase )
else:
raise ValueError("Either arr or size must be specified" )
def A_ ( self : Optional[Any], _UpperCAmelCase : list[int] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = deepcopy(_UpperCAmelCase )
for i in range(1, self.size ):
SCREAMING_SNAKE_CASE__ : Tuple = self.next_(_UpperCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def A_ ( self : Dict ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tree[:]
for i in range(self.size - 1, 0, -1 ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.next_(_UpperCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def A_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def A_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return index - (index & (-index))
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : int ) -> None:
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
SCREAMING_SNAKE_CASE__ : Optional[int] = self.next_(_UpperCAmelCase )
def A_ ( self : str, _UpperCAmelCase : int, _UpperCAmelCase : int ) -> None:
"""simple docstring"""
self.add(_UpperCAmelCase, value - self.get(_UpperCAmelCase ) )
def A_ ( self : Optional[Any], _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if right == 0:
return 0
SCREAMING_SNAKE_CASE__ : Dict = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
SCREAMING_SNAKE_CASE__ : Any = self.prev(_UpperCAmelCase )
return result
def A_ ( self : str, _UpperCAmelCase : int, _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return self.prefix(_UpperCAmelCase ) - self.prefix(_UpperCAmelCase )
def A_ ( self : Dict, _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return self.query(_UpperCAmelCase, index + 1 )
def A_ ( self : List[str], _UpperCAmelCase : int ) -> int:
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
SCREAMING_SNAKE_CASE__ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157
| 0
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCamelCase_ : int = TypeVar("""T""")
def UpperCamelCase ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def UpperCamelCase ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def UpperCamelCase ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class __lowercase ( Generic[T] ):
def __init__(self : List[str] ) -> None:
_lowercase : list[tuple[T, int]] = []
_lowercase : dict[T, int] = {}
_lowercase : int = 0
def __len__(self : Optional[Any] ) -> int:
return self.elements
def __repr__(self : str ) -> str:
return str(self.heap )
def _a(self : Optional[Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _a(self : Optional[Any] , snake_case : T , snake_case : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_lowercase : Optional[Any] = self.elements
self.elements += 1
self._bubble_up(snake_case )
def _a(self : Union[str, Any] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_lowercase , _lowercase : Optional[int] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_lowercase , _lowercase : Union[str, Any] = self.heap[0]
self._bubble_down(snake_case )
return elem
def _a(self : List[Any] , snake_case : T , snake_case : int ) -> None:
# Update the weight of the given key
_lowercase : int = self.position_map[elem]
_lowercase : str = (elem, weight)
if position > 0:
_lowercase : Any = get_parent_position(snake_case )
_lowercase , _lowercase : Tuple = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(snake_case )
else:
self._bubble_down(snake_case )
else:
self._bubble_down(snake_case )
def _a(self : Union[str, Any] , snake_case : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_lowercase : Any = self.position_map[elem]
if curr_pos == 0:
return None
_lowercase : Any = get_parent_position(snake_case )
_lowercase , _lowercase : Any = self.heap[curr_pos]
_lowercase , _lowercase : Union[str, Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(snake_case , snake_case )
return self._bubble_up(snake_case )
return None
def _a(self : List[str] , snake_case : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_lowercase : str = self.position_map[elem]
_lowercase , _lowercase : Union[str, Any] = self.heap[curr_pos]
_lowercase : Union[str, Any] = get_child_left_position(snake_case )
_lowercase : str = get_child_right_position(snake_case )
if child_left_position < self.elements and child_right_position < self.elements:
_lowercase , _lowercase : int = self.heap[child_left_position]
_lowercase , _lowercase : str = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(snake_case , snake_case )
return self._bubble_down(snake_case )
if child_left_position < self.elements:
_lowercase , _lowercase : Tuple = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(snake_case , snake_case )
return self._bubble_down(snake_case )
else:
return None
if child_right_position < self.elements:
_lowercase , _lowercase : Union[str, Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(snake_case , snake_case )
return self._bubble_down(snake_case )
return None
def _a(self : List[str] , snake_case : int , snake_case : int ) -> None:
# Swap the nodes at the given positions
_lowercase : Optional[Any] = self.heap[nodea_pos][0]
_lowercase : List[str] = self.heap[nodea_pos][0]
_lowercase , _lowercase : int = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_lowercase : str = nodea_pos
_lowercase : Dict = nodea_pos
class __lowercase ( Generic[T] ):
def __init__(self : List[str] ) -> None:
_lowercase : dict[T, dict[T, int]] = {}
_lowercase : int = 0
def __repr__(self : Optional[int] ) -> str:
return str(self.connections )
def __len__(self : Optional[Any] ) -> int:
return self.nodes
def _a(self : Tuple , snake_case : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_lowercase : Optional[int] = {}
self.nodes += 1
def _a(self : str , snake_case : T , snake_case : T , snake_case : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(snake_case )
self.add_node(snake_case )
_lowercase : Optional[Any] = weight
_lowercase : Optional[int] = weight
def UpperCamelCase ( _UpperCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_lowercase : dict[T, int] = {node: maxsize for node in graph.connections}
_lowercase : dict[T, T | None] = {node: None for node in graph.connections}
_lowercase : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_UpperCAmelCase , _UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
_lowercase : List[str] = priority_queue.extract_min()
_lowercase : Tuple = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowercase : Any = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
_lowercase : int = node
# running prim's algorithm
while not priority_queue.is_empty():
_lowercase : str = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowercase : Optional[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
_lowercase : Any = node
return dist, parent
| 461
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
UpperCamelCase_ : int = logging.get_logger(__name__)
def UpperCamelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = nn.functional.normalize(_UpperCAmelCase )
_lowercase : List[str] = nn.functional.normalize(_UpperCAmelCase )
return torch.mm(_UpperCAmelCase , normalized_text_embeds.t() )
class __lowercase ( __snake_case ):
_A = CLIPConfig
_A = ["CLIPEncoderLayer"]
def __init__(self : List[str] , snake_case : CLIPConfig ) -> Optional[int]:
super().__init__(snake_case )
_lowercase : Tuple = CLIPVisionModel(config.vision_config )
_lowercase : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case )
_lowercase : int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case )
_lowercase : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case )
_lowercase : Any = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case )
_lowercase : Dict = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case )
@torch.no_grad()
def _a(self : int , snake_case : Tuple , snake_case : Optional[Any] ) -> Union[str, Any]:
_lowercase : str = self.vision_model(snake_case )[1] # pooled_output
_lowercase : List[Any] = self.visual_projection(snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowercase : str = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy()
_lowercase : Optional[int] = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy()
_lowercase : List[str] = []
_lowercase : int = image_embeds.shape[0]
for i in range(snake_case ):
_lowercase : int = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowercase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowercase : Union[str, Any] = special_cos_dist[i][concept_idx]
_lowercase : Optional[int] = self.special_care_embeds_weights[concept_idx].item()
_lowercase : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
_lowercase : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
_lowercase : Tuple = cos_dist[i][concept_idx]
_lowercase : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
_lowercase : str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case )
result.append(snake_case )
_lowercase : Dict = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _a(self : str , snake_case : torch.FloatTensor , snake_case : torch.FloatTensor ) -> List[str]:
_lowercase : Dict = self.vision_model(snake_case )[1] # pooled_output
_lowercase : Any = self.visual_projection(snake_case )
_lowercase : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds )
_lowercase : Optional[Any] = cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowercase : Optional[int] = 0.0
_lowercase : Optional[int] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowercase : List[str] = torch.any(special_scores > 0 , dim=1 )
_lowercase : List[Any] = special_care * 0.01
_lowercase : Optional[Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowercase : Any = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowercase : List[Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 461
| 1
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __UpperCAmelCase :
A__ : CommonSchedulerState
# setable values
A__ : jnp.ndarray
A__ : jnp.ndarray
A__ : Optional[int] = None
@classmethod
def _a ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return cls(common=A_ , init_noise_sigma=A_ , timesteps=A_ )
@dataclass
class __UpperCAmelCase ( __snake_case ):
A__ : DDPMSchedulerState
class __UpperCAmelCase ( __snake_case , __snake_case ):
A__ : Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
A__ : jnp.dtype
@property
def _a ( self ):
return True
@register_to_config
def __init__( self , _lowerCamelCase = 1000 , _lowerCamelCase = 0.0_0_0_1 , _lowerCamelCase = 0.0_2 , _lowerCamelCase = "linear" , _lowerCamelCase = None , _lowerCamelCase = "fixed_small" , _lowerCamelCase = True , _lowerCamelCase = "epsilon" , _lowerCamelCase = jnp.floataa , ):
lowerCamelCase__ =dtype
def _a ( self , _lowerCamelCase = None ):
if common is None:
lowerCamelCase__ =CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase__ =jnp.array(1.0 , dtype=self.dtype )
lowerCamelCase__ =jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=A_ , init_noise_sigma=A_ , timesteps=A_ , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None ):
return sample
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = () ):
lowerCamelCase__ =self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase__ =(jnp.arange(0 , A_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A_ , timesteps=A_ , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
lowerCamelCase__ =state.common.alphas_cumprod[t]
lowerCamelCase__ =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase__ =(1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase__ =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase__ =jnp.clip(A_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase__ =jnp.log(jnp.clip(A_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase__ =state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase__ =jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase__ =variance
lowerCamelCase__ =state.common.betas[t]
lowerCamelCase__ =(predicted_variance + 1) / 2
lowerCamelCase__ =frac * max_log + (1 - frac) * min_log
return variance
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , ):
lowerCamelCase__ =timestep
if key is None:
lowerCamelCase__ =jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ =jnp.split(A_ , sample.shape[1] , axis=1 )
else:
lowerCamelCase__ =None
# 1. compute alphas, betas
lowerCamelCase__ =state.common.alphas_cumprod[t]
lowerCamelCase__ =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCamelCase__ =1 - alpha_prod_t
lowerCamelCase__ =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase__ =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase__ =model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase__ =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase__ =jnp.clip(A_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ =(alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase__ =state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase__ =jax.random.split(A_ , num=1 )
lowerCamelCase__ =jax.random.normal(A_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(A_ , A_ , predicted_variance=A_ ) ** 0.5) * noise
lowerCamelCase__ =jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCamelCase__ =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A_ , state=A_ )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
return add_noise_common(state.common , A_ , A_ , A_ )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
return get_velocity_common(state.common , A_ , A_ , A_ )
def __len__( self ):
return self.config.num_train_timesteps
| 716
|
"""simple docstring"""
import heapq
import sys
import numpy as np
a =tuple[int, int]
class __UpperCAmelCase :
def __init__( self ):
lowerCamelCase__ =[]
lowerCamelCase__ =set()
def _a ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def _a ( self ):
return len(self.elements ) == 0
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCamelCase )
else:
# update
# print("update", item)
lowerCamelCase__ =[]
((lowerCamelCase__) , (lowerCamelCase__)) =heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCamelCase__) , (lowerCamelCase__)) =heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self , _lowerCamelCase ):
if item in self.set:
self.set.remove(_lowerCamelCase )
lowerCamelCase__ =[]
((lowerCamelCase__) , (lowerCamelCase__)) =heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCamelCase__) , (lowerCamelCase__)) =heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self ):
return self.elements[0][1]
def _a ( self ):
((lowerCamelCase__) , (lowerCamelCase__)) =heapq.heappop(self.elements )
self.set.remove(_lowerCamelCase )
return (priority, item)
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ =np.array(__lowerCAmelCase )
lowerCamelCase__ =np.array(__lowerCAmelCase )
return np.linalg.norm(a - b )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
return consistent_heuristic(__lowerCAmelCase , __lowerCAmelCase ) // t
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ =g_function[start] + Wa * heuristics[i](__lowerCAmelCase , __lowerCAmelCase )
return ans
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ =np.chararray((n, n) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowerCamelCase__ ="*"
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if (j, (n - 1) - i) in blocks:
lowerCamelCase__ ="#"
lowerCamelCase__ ="-"
lowerCamelCase__ =back_pointer[goal]
while x != start:
((lowerCamelCase__) , (lowerCamelCase__)) =x
# print(x)
lowerCamelCase__ ="-"
lowerCamelCase__ =back_pointer[x]
lowerCamelCase__ ="-"
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
lowerCamelCase__ =back_pointer[goal]
while x != start:
print(__lowerCAmelCase , end=" " )
lowerCamelCase__ =back_pointer[x]
print(__lowerCAmelCase )
sys.exit()
def lowerCamelCase_ ( __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
for itera in range(__lowerCAmelCase ):
open_list[itera].remove_element(__lowerCAmelCase )
# print("s", s)
# print("j", j)
((lowerCamelCase__) , (lowerCamelCase__)) =s
lowerCamelCase__ =(x - 1, y)
lowerCamelCase__ =(x + 1, y)
lowerCamelCase__ =(x, y + 1)
lowerCamelCase__ =(x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowerCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowerCAmelCase )
lowerCamelCase__ =-1
lowerCamelCase__ =float("inf" )
if valid(__lowerCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
lowerCamelCase__ =g_function[s] + 1
lowerCamelCase__ =s
if neighbours not in close_list_anchor:
open_list[0].put(__lowerCAmelCase , key(__lowerCAmelCase , 0 , __lowerCAmelCase , __lowerCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowerCAmelCase ):
if key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) <= Wa * key(
__lowerCAmelCase , 0 , __lowerCAmelCase , __lowerCAmelCase ):
open_list[j].put(
__lowerCAmelCase , key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase__ =[]
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a ={0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a =[
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a =make_common_ground()
a =blocks_blk
# hyper parameters
a =1
a =1
a =20
a =3 # one consistent and two other inconsistent
# start and end destination
a =(0, 0)
a =(n - 1, n - 1)
a =1
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowerCamelCase__ ={start: 0, goal: float("inf" )}
lowerCamelCase__ ={start: -1, goal: -1}
lowerCamelCase__ =[]
lowerCamelCase__ =set()
for i in range(__lowerCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowerCAmelCase , key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =[]
lowerCamelCase__ =[]
while open_list[0].minkey() < float("inf" ):
for i in range(1 , __lowerCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowerCamelCase__ , lowerCamelCase__ =open_list[i].top_show()
visited.add(__lowerCAmelCase )
expand_state(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
close_list_inad.append(__lowerCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowerCamelCase__ =open_list[0].top_show()
visited.add(__lowerCAmelCase )
expand_state(
__lowerCAmelCase , 0 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
close_list_anchor.append(__lowerCAmelCase )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowerCAmelCase ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 132
| 0
|
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__UpperCamelCase : Union[str, Any] = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__UpperCamelCase : Any = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__UpperCamelCase : int = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__UpperCamelCase : List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__UpperCamelCase : Dict = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__UpperCamelCase : Optional[int] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__UpperCamelCase : Optional[int] = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase ,lowerCAmelCase = randrange(len(_UpperCAmelCase ) ), randrange(len(_UpperCAmelCase ) )
lowerCAmelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
lowerCAmelCase ,lowerCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 100 ):
return (generate_random_hand() for _ in range(_UpperCAmelCase ))
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : List[str] ):
assert PokerHand(_UpperCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ):
assert PokerHand(_UpperCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
lowerCAmelCase = PokerHand(_UpperCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ):
assert PokerHand(_UpperCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ):
assert PokerHand(_UpperCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ):
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ):
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [PokerHand(_UpperCAmelCase ) for hand in SORTED_HANDS]
lowerCAmelCase = poker_hands.copy()
shuffle(_UpperCAmelCase )
lowerCAmelCase = chain(sorted(_UpperCAmelCase ) )
for index, hand in enumerate(_UpperCAmelCase ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE ():
# Test that five high straights are compared correctly.
lowerCAmelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_UpperCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE ():
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowerCAmelCase = PokerHand('2C 4S AS 3D 5C' )
lowerCAmelCase = True
lowerCAmelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE ():
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowerCAmelCase = 0
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'poker_hands.txt' )
with open(_UpperCAmelCase ) as file_hand:
for line in file_hand:
lowerCAmelCase = line[:14].strip()
lowerCAmelCase = line[15:].strip()
lowerCAmelCase ,lowerCAmelCase = PokerHand(_UpperCAmelCase ), PokerHand(_UpperCAmelCase )
lowerCAmelCase = player.compare_with(_UpperCAmelCase )
if output == "Win":
answer += 1
assert answer == 376
| 4
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""DeiTFeatureExtractor"""]
SCREAMING_SNAKE_CASE = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 199
| 0
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "timesformer"
def __init__( self : List[str] , __lowerCamelCase : Optional[int]=2_2_4 , __lowerCamelCase : List[str]=1_6 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : Optional[Any]=7_6_8 , __lowerCamelCase : Tuple=1_2 , __lowerCamelCase : Optional[Any]=1_2 , __lowerCamelCase : Any=3_0_7_2 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Optional[Any]=1e-6 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]="divided_space_time" , __lowerCamelCase : Any=0 , **__lowerCamelCase : str , ):
super().__init__(**__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = image_size
UpperCAmelCase__ :Any = patch_size
UpperCAmelCase__ :Optional[Any] = num_channels
UpperCAmelCase__ :List[Any] = num_frames
UpperCAmelCase__ :Optional[Any] = hidden_size
UpperCAmelCase__ :List[Any] = num_hidden_layers
UpperCAmelCase__ :List[str] = num_attention_heads
UpperCAmelCase__ :Any = intermediate_size
UpperCAmelCase__ :Tuple = hidden_act
UpperCAmelCase__ :Dict = hidden_dropout_prob
UpperCAmelCase__ :List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ :Optional[Any] = initializer_range
UpperCAmelCase__ :int = layer_norm_eps
UpperCAmelCase__ :Dict = qkv_bias
UpperCAmelCase__ :Any = attention_type
UpperCAmelCase__ :Optional[Any] = drop_path_rate
| 467
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=_snake_case ):
UpperCAmelCase = ["note_seq"]
def __init__( self : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ):
requires_backends(self , ['''note_seq'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ):
requires_backends(cls , ['''note_seq'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : str , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[Any] ):
requires_backends(cls , ['''note_seq'''] )
| 467
| 1
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __A ( a__ ):
'''simple docstring'''
def __lt__( self , __lowerCAmelCase ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self , __lowerCAmelCase ):
'''simple docstring'''
return self[-1] == other[-1]
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
# sort into stacks
for element in collection:
lowerCamelCase__ = Stack([element] )
lowerCamelCase__ = bisect_left(lowerCAmelCase_ ,lowerCAmelCase_ )
if i != len(lowerCAmelCase_ ):
stacks[i].append(lowerCAmelCase_ )
else:
stacks.append(lowerCAmelCase_ )
# use a heap-based merge to merge stack efficiently
lowerCamelCase__ = merge(*(reversed(lowerCAmelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
_a = input("Enter numbers separated by a comma:\n").strip()
_a = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 702
|
import string
from math import logaa
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]:
'''simple docstring'''
lowerCamelCase__ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
return round(tf * idf ,3 )
| 29
| 0
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=64 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = parent
__SCREAMING_SNAKE_CASE: Any = batch_size
__SCREAMING_SNAKE_CASE: Dict = seq_length
__SCREAMING_SNAKE_CASE: List[str] = is_training
__SCREAMING_SNAKE_CASE: Any = use_input_mask
__SCREAMING_SNAKE_CASE: Optional[int] = use_token_type_ids
__SCREAMING_SNAKE_CASE: Dict = use_labels
__SCREAMING_SNAKE_CASE: Any = vocab_size
__SCREAMING_SNAKE_CASE: List[Any] = hidden_size
__SCREAMING_SNAKE_CASE: str = embedding_size
__SCREAMING_SNAKE_CASE: Any = num_hidden_layers
__SCREAMING_SNAKE_CASE: Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE: List[str] = intermediate_size
__SCREAMING_SNAKE_CASE: Any = hidden_act
__SCREAMING_SNAKE_CASE: Union[str, Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE: Optional[int] = type_vocab_size
__SCREAMING_SNAKE_CASE: Any = type_sequence_label_size
__SCREAMING_SNAKE_CASE: Dict = initializer_range
__SCREAMING_SNAKE_CASE: List[Any] = num_labels
__SCREAMING_SNAKE_CASE: List[str] = num_choices
__SCREAMING_SNAKE_CASE: Union[str, Any] = scope
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE: Any = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE: str = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE: Union[str, Any] = None
__SCREAMING_SNAKE_CASE: Optional[int] = None
__SCREAMING_SNAKE_CASE: Tuple = None
if self.use_labels:
__SCREAMING_SNAKE_CASE: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE: int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE: Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = MegatronBertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: str = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = MegatronBertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: int = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = MegatronBertForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: List[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = MegatronBertForNextSentencePrediction(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: str = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = MegatronBertForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Tuple = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , next_sentence_label=_lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = MegatronBertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.num_labels
__SCREAMING_SNAKE_CASE: str = MegatronBertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = self.num_labels
__SCREAMING_SNAKE_CASE: str = MegatronBertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = self.num_choices
__SCREAMING_SNAKE_CASE: Any = MegatronBertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE: Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE: Optional[int] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
): str = config_and_inputs
__SCREAMING_SNAKE_CASE: Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowercase ,__lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : List[str] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[Any] = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[int] = True
# test_resize_embeddings = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = MegatronBertModelTester(self )
__SCREAMING_SNAKE_CASE: Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowerCAmelCase )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
lowerCAmelCase : Any = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__SCREAMING_SNAKE_CASE: Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = MegatronBertModel.from_pretrained(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.half()
__SCREAMING_SNAKE_CASE: str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Dict = model(_lowerCAmelCase )[0]
__SCREAMING_SNAKE_CASE: Dict = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__SCREAMING_SNAKE_CASE: Tuple = output[0, ii, jj]
__SCREAMING_SNAKE_CASE: Tuple = expected[3 * ii + jj]
__SCREAMING_SNAKE_CASE: List[Any] = '''ii={} jj={} a={} b={}'''.format(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(math.isclose(_lowerCAmelCase , _lowerCAmelCase , rel_tol=_lowerCAmelCase , abs_tol=_lowerCAmelCase ) , msg=_lowerCAmelCase )
| 202
|
import pprint
import requests
lowerCamelCase__ = "https://zenquotes.io/api"
def __A() -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __A() -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
lowerCamelCase__ = random_quotes()
pprint.pprint(response)
| 612
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowercase__( ):
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
print('Generating prime p...' )
lowercase_ : List[str] = rabinMiller.generate_large_prime(__SCREAMING_SNAKE_CASE )
print('Generating prime q...' )
lowercase_ : int = rabinMiller.generate_large_prime(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
lowercase_ : str = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
lowercase_ : List[str] = cryptoMath.find_mod_inverse(__SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) )
lowercase_ : Any = (n, e)
lowercase_ : int = (n, d)
return (public_key, private_key)
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowercase_ , lowercase_ : int = generate_key(__SCREAMING_SNAKE_CASE )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , 'w' ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , 'w' ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 477
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : List[str] = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class __snake_case ( UpperCamelCase_ ):
_a = '''falcon'''
_a = ['''past_key_values''']
def __init__( self : List[Any] , A_ : int=6_5_0_2_4 , A_ : Tuple=4_5_4_4 , A_ : List[str]=3_2 , A_ : Any=7_1 , A_ : Union[str, Any]=1e-5 , A_ : Optional[Any]=0.02 , A_ : Dict=True , A_ : str=0.0 , A_ : List[str]=0.0 , A_ : List[str]=None , A_ : Optional[Any]=False , A_ : Union[str, Any]=False , A_ : str=True , A_ : Optional[Any]=True , A_ : Dict=False , A_ : List[Any]=1_1 , A_ : List[str]=1_1 , **A_ : Union[str, Any] , ):
lowerCAmelCase_ : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase_ : Dict = kwargs.pop('''n_embed''' , A_)
lowerCAmelCase_ : Union[str, Any] = hidden_size if n_embed is None else n_embed
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Optional[int] = layer_norm_epsilon
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Optional[int] = hidden_dropout
lowerCAmelCase_ : List[str] = attention_dropout
lowerCAmelCase_ : Optional[Any] = bos_token_id
lowerCAmelCase_ : Union[str, Any] = eos_token_id
lowerCAmelCase_ : List[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
lowerCAmelCase_ : Tuple = alibi
lowerCAmelCase_ : Tuple = new_decoder_architecture
lowerCAmelCase_ : Dict = multi_query # Ignored when new_decoder_architecture is True
lowerCAmelCase_ : int = parallel_attn
lowerCAmelCase_ : List[str] = bias
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_)
@property
def UpperCAmelCase__ ( self : Tuple):
return self.hidden_size // self.num_attention_heads
@property
def UpperCAmelCase__ ( self : Tuple):
return not self.alibi
| 171
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A__ : Tuple = TypeVar('''KEY''')
A__ : List[Any] = TypeVar('''VAL''')
@dataclass(frozen=UpperCamelCase_ ,slots=UpperCamelCase_ )
class __snake_case ( Generic[KEY, VAL] ):
_a = 42
_a = 42
class __snake_case ( _Item ):
def __init__( self : Union[str, Any]):
super().__init__(A_ , A_)
def __bool__( self : int):
return False
A__ : Optional[int] = _DeletedItem()
class __snake_case ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , A_ : int = 8 , A_ : float = 0.75):
lowerCAmelCase_ : Optional[Any] = initial_block_size
lowerCAmelCase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase_ : str = capacity_factor
lowerCAmelCase_ : Union[str, Any] = 0
def UpperCAmelCase__ ( self : str , A_ : KEY):
return hash(A_) % len(self._buckets)
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int):
return (ind + 1) % len(self._buckets)
def UpperCAmelCase__ ( self : Tuple , A_ : int , A_ : KEY , A_ : VAL):
lowerCAmelCase_ : str = self._buckets[ind]
if not stored:
lowerCAmelCase_ : Optional[int] = _Item(A_ , A_)
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase_ : Union[str, Any] = _Item(A_ , A_)
return True
else:
return False
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : List[Any] = len(self._buckets) * self._capacity_factor
return len(self) >= int(A_)
def UpperCAmelCase__ ( self : List[Any]):
if len(self._buckets) <= self._initial_block_size:
return False
lowerCAmelCase_ : Any = len(self._buckets) * self._capacity_factor / 2
return len(self) < limit
def UpperCAmelCase__ ( self : Optional[Any] , A_ : int):
lowerCAmelCase_ : List[str] = self._buckets
lowerCAmelCase_ : str = [None] * new_size
lowerCAmelCase_ : Optional[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val)
def UpperCAmelCase__ ( self : int):
self._resize(len(self._buckets) * 2)
def UpperCAmelCase__ ( self : Dict):
self._resize(len(self._buckets) // 2)
def UpperCAmelCase__ ( self : List[str] , A_ : KEY):
lowerCAmelCase_ : str = self._get_bucket_index(A_)
for _ in range(len(self._buckets)):
yield ind
lowerCAmelCase_ : Tuple = self._get_next_ind(A_)
def UpperCAmelCase__ ( self : List[Any] , A_ : KEY , A_ : VAL):
for ind in self._iterate_buckets(A_):
if self._try_set(A_ , A_ , A_):
break
def __setitem__( self : int , A_ : KEY , A_ : VAL):
if self._is_full():
self._size_up()
self._add_item(A_ , A_)
def __delitem__( self : Tuple , A_ : KEY):
for ind in self._iterate_buckets(A_):
lowerCAmelCase_ : Tuple = self._buckets[ind]
if item is None:
raise KeyError(A_)
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase_ : Dict = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[str] , A_ : KEY):
for ind in self._iterate_buckets(A_):
lowerCAmelCase_ : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A_)
def __len__( self : Dict):
return self._len
def __iter__( self : Dict):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any]):
lowerCAmelCase_ : List[Any] = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item)
return F"""HashMap({val_string})"""
| 171
| 1
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Any = '▁'
SCREAMING_SNAKE_CASE_ : Dict = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
SCREAMING_SNAKE_CASE_ : str = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
SCREAMING_SNAKE_CASE_ : List[Any] = {
'facebook/s2t-small-librispeech-asr': 1_0_2_4,
}
SCREAMING_SNAKE_CASE_ : Tuple = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
SCREAMING_SNAKE_CASE_ : str = {'mustc': MUSTC_LANGS}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = MAX_MODEL_INPUT_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = []
def __init__( self: Union[str, Any] , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple="<s>" , UpperCamelCase: str="</s>" , UpperCamelCase: List[Any]="<pad>" , UpperCamelCase: Union[str, Any]="<unk>" , UpperCamelCase: int=False , UpperCamelCase: str=False , UpperCamelCase: List[str]=None , UpperCamelCase: Any=None , UpperCamelCase: Optional[Dict[str, Any]] = None , **UpperCamelCase: int , ):
"""simple docstring"""
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , do_upper_case=UpperCamelCase , do_lower_case=UpperCamelCase , tgt_lang=UpperCamelCase , lang_codes=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
A__ = do_upper_case
A__ = do_lower_case
A__ = load_json(UpperCamelCase )
A__ = {v: k for k, v in self.encoder.items()}
A__ = spm_file
A__ = load_spm(UpperCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A__ = lang_codes
A__ = LANGUAGES[lang_codes]
A__ = [f"""<lang:{lang}>""" for lang in self.langs]
A__ = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
A__ = self.lang_tokens
A__ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A__ = {}
@property
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCamelCase )
def UpperCamelCase ( self: int , UpperCamelCase: str ):
"""simple docstring"""
A__ = self.lang_code_to_id[tgt_lang]
A__ = [lang_code_id]
def UpperCamelCase ( self: Dict , UpperCamelCase: str ):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
def UpperCamelCase ( self: Dict , UpperCamelCase: Dict ):
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder[self.unk_token] )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
return self.decoder.get(UpperCamelCase , self.unk_token )
def UpperCamelCase ( self: str , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = []
A__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A__ = self.sp_model.decode(UpperCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A__ = []
else:
current_sub_tokens.append(UpperCamelCase )
A__ = self.sp_model.decode(UpperCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: int=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self: List[Any] , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
A__ = [1] * len(self.prefix_tokens )
A__ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase )) + ([0] * len(UpperCamelCase )) + suffix_ones
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: str ):
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self: Any , UpperCamelCase: Dict ):
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ = {}
A__ = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ):
"""simple docstring"""
A__ = Path(UpperCamelCase )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
A__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
A__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , UpperCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(UpperCamelCase , """wb""" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (str(UpperCamelCase ), str(UpperCamelCase ))
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict[str, Any] ):
A__ = sentencepiece.SentencePieceProcessor(**UpperCAmelCase_ )
spm.Load(str(UpperCAmelCase_ ) )
return spm
def _snake_case ( UpperCAmelCase_ : str ):
with open(UpperCAmelCase_ , """r""" ) as f:
return json.load(UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
with open(UpperCAmelCase_ , """w""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , indent=2 )
| 716
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list ):
if len(UpperCAmelCase_ ) <= 1:
return lst
A__ = 1
while i < len(UpperCAmelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A__ , A__ = lst[i], lst[i - 1]
i -= 1
if i == 0:
A__ = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 500
| 0
|
lowercase__ : List[str] = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
lowercase__ : Optional[Any] = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 1_2,
'''Pm''': 1_5,
'''Em''': 1_8,
'''Zm''': 2_1,
'''Ym''': 2_4,
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> float:
lowerCAmelCase = from_type.lower().strip('''s''' )
lowerCAmelCase = to_type.lower().strip('''s''' )
lowerCAmelCase = UNIT_SYMBOL.get(snake_case__ , snake_case__ )
lowerCAmelCase = UNIT_SYMBOL.get(snake_case__ , snake_case__ )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase = (
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(snake_case__ )}"
)
raise ValueError(snake_case__ )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase = (
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(snake_case__ )}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase = 1
if from_exponent > to_exponent:
lowerCAmelCase = from_exponent - to_exponent
else:
lowerCAmelCase = -(to_exponent - from_exponent)
return value * pow(1_0 , snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 312
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=() , snake_case__=None , snake_case__="no" , snake_case__="29500" ) -> Dict:
lowerCAmelCase = False
lowerCAmelCase = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
lowerCAmelCase = True
elif "IPython" in sys.modules:
lowerCAmelCase = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
lowerCAmelCase = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , snake_case__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
lowerCAmelCase = 8
lowerCAmelCase = PrepareForLaunch(snake_case__ , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(snake_case__ , args=snake_case__ , nprocs=snake_case__ , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*snake_case__ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=snake_case__ , master_addr='''127.0.01''' , master_port=snake_case__ , mixed_precision=snake_case__ ):
lowerCAmelCase = PrepareForLaunch(snake_case__ , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(snake_case__ , args=snake_case__ , nprocs=snake_case__ , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCAmelCase = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=() , snake_case__=2 ) -> Dict:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=snake_case__ , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
lowerCAmelCase = PrepareForLaunch(snake_case__ , debug=snake_case__ )
start_processes(snake_case__ , args=snake_case__ , nprocs=snake_case__ , start_method='''fork''' )
| 312
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """layoutlmv3"""
def __init__(self , lowerCAmelCase__=5_02_65 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=10_24 , lowerCAmelCase__=1_28 , lowerCAmelCase__=1_28 , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=1_28 , lowerCAmelCase__=64 , lowerCAmelCase__=2_56 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2_24 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
'''simple docstring'''
super().__init__(
vocab_size=lowerCAmelCase__ , hidden_size=lowerCAmelCase__ , num_hidden_layers=lowerCAmelCase__ , num_attention_heads=lowerCAmelCase__ , intermediate_size=lowerCAmelCase__ , hidden_act=lowerCAmelCase__ , hidden_dropout_prob=lowerCAmelCase__ , attention_probs_dropout_prob=lowerCAmelCase__ , max_position_embeddings=lowerCAmelCase__ , type_vocab_size=lowerCAmelCase__ , initializer_range=lowerCAmelCase__ , layer_norm_eps=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase : Optional[Any] = max_ad_position_embeddings
_UpperCamelCase : Any = coordinate_size
_UpperCamelCase : List[Any] = shape_size
_UpperCamelCase : Optional[Any] = has_relative_attention_bias
_UpperCamelCase : Optional[int] = rel_pos_bins
_UpperCamelCase : Dict = max_rel_pos
_UpperCamelCase : Tuple = has_spatial_attention_bias
_UpperCamelCase : Optional[int] = rel_ad_pos_bins
_UpperCamelCase : Any = max_rel_ad_pos
_UpperCamelCase : Union[str, Any] = text_embed
_UpperCamelCase : Dict = visual_embed
_UpperCamelCase : int = input_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = patch_size
_UpperCamelCase : Tuple = classifier_dropout
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = version.parse("""1.12""" )
@property
def lowercase_ (self ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def lowercase_ (self ):
'''simple docstring'''
return 1E-5
@property
def lowercase_ (self ):
'''simple docstring'''
return 12
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 40 , lowerCAmelCase__ = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , "apply_ocr" , lowerCAmelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCamelCase : Union[str, Any] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCamelCase : Any = processor.tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
_UpperCamelCase : str = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
_UpperCamelCase : List[Any] = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCamelCase : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCamelCase : Union[str, Any] = self._generate_dummy_images(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = dict(
processor(
lowerCAmelCase__ , text=lowerCAmelCase__ , boxes=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , ) )
return inputs
| 239
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> str:
_UpperCamelCase : str = state_dict.pop(__lowerCAmelCase )
_UpperCamelCase : Union[str, Any] = val
def __lowerCAmelCase ( __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCamelCase : Optional[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_UpperCamelCase : str = value
else:
_UpperCamelCase : str = value
return new_state_dict
def __lowerCAmelCase ( __lowerCAmelCase : str ) -> List[str]:
_UpperCamelCase : Any = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCamelCase : Any = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCamelCase : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : Tuple = in_proj_weight[:256, :]
_UpperCamelCase : Dict = in_proj_bias[:256]
_UpperCamelCase : int = in_proj_weight[256:512, :]
_UpperCamelCase : Tuple = in_proj_bias[256:512]
_UpperCamelCase : str = in_proj_weight[-256:, :]
_UpperCamelCase : int = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCamelCase : Optional[int] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[Any] = in_proj_weight[:256, :]
_UpperCamelCase : str = in_proj_bias[:256]
_UpperCamelCase : Optional[Any] = in_proj_weight[256:512, :]
_UpperCamelCase : str = in_proj_bias[256:512]
_UpperCamelCase : Tuple = in_proj_weight[-256:, :]
_UpperCamelCase : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCamelCase : Any = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_UpperCamelCase : Tuple = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCamelCase : Optional[Any] = in_proj_weight_cross_attn[:256, :]
_UpperCamelCase : str = in_proj_bias_cross_attn[:256]
_UpperCamelCase : Dict = in_proj_weight_cross_attn[256:512, :]
_UpperCamelCase : int = in_proj_bias_cross_attn[256:512]
_UpperCamelCase : int = in_proj_weight_cross_attn[-256:, :]
_UpperCamelCase : Optional[Any] = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> List[Any]:
_UpperCamelCase , _UpperCamelCase : List[str] = image.size
_UpperCamelCase : Dict = max(__lowerCAmelCase , __lowerCAmelCase )
_UpperCamelCase : Tuple = 800 if "detection" in checkpoint_url else 1000
_UpperCamelCase : Tuple = target_max_size / current_max_size
_UpperCamelCase : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = F.to_tensor(__lowerCAmelCase )
_UpperCamelCase : Dict = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCAmelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> Tuple:
logger.info("Converting model..." )
# load original state dict
_UpperCamelCase : Tuple = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCamelCase : List[str] = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCamelCase : List[Any] = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_UpperCamelCase : Dict = state_dict.pop(__lowerCAmelCase )
_UpperCamelCase : Any = val
# create HuggingFace model and load state dict
_UpperCamelCase : str = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_UpperCamelCase : List[str] = 15
_UpperCamelCase : Optional[Any] = 2
_UpperCamelCase : Optional[Any] = {0: "table", 1: "table rotated"}
_UpperCamelCase : Any = idalabel
_UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_UpperCamelCase : Any = 125
_UpperCamelCase : List[str] = 6
_UpperCamelCase : str = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_UpperCamelCase : Optional[Any] = idalabel
_UpperCamelCase : str = {v: k for k, v in idalabel.items()}
_UpperCamelCase : str = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
_UpperCamelCase : Any = TableTransformerForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion
_UpperCamelCase : str = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_UpperCamelCase : Dict = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__lowerCAmelCase )
_UpperCamelCase : Dict = Image.open(__lowerCAmelCase ).convert("RGB" )
_UpperCamelCase : List[Any] = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 )
_UpperCamelCase : str = model(__lowerCAmelCase )
if "detection" in checkpoint_url:
_UpperCamelCase : Any = (1, 15, 3)
_UpperCamelCase : Optional[Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_UpperCamelCase : Any = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_UpperCamelCase : Any = (1, 125, 7)
_UpperCamelCase : List[Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_UpperCamelCase : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_UpperCamelCase : Any = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__lowerCAmelCase )
image_processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 239
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase , lowercase=False , lowercase=False ) -> Optional[Any]:
__lowerCAmelCase = """backbone.""" if is_semantic else """"""
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', """beit.embeddings.cls_token"""),
(f'{prefix}patch_embed.proj.weight', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'{prefix}patch_embed.proj.bias', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'{prefix}pos_embed', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( lowercase , lowercase , lowercase=False , lowercase=False ) -> Any:
for i in range(config.num_hidden_layers ):
__lowerCAmelCase = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowerCAmelCase = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
__lowerCAmelCase = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
__lowerCAmelCase = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = q_bias
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowerCAmelCase = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
__lowerCAmelCase = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
__lowerCAmelCase = gamma_a
__lowerCAmelCase = gamma_a
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Tuple:
__lowerCAmelCase = dct.pop(_A )
__lowerCAmelCase = val
def _lowerCAmelCase ( ) -> Dict:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=False ) -> int:
__lowerCAmelCase = False if """rvlcdip""" in checkpoint_url else True
__lowerCAmelCase = BeitConfig(use_absolute_position_embeddings=_A , use_mask_token=_A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowerCAmelCase = 16
__lowerCAmelCase = """huggingface/label-files"""
__lowerCAmelCase = """rvlcdip-id2label.json"""
__lowerCAmelCase = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(_A ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = create_rename_keys(_A , has_lm_head=_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , has_lm_head=_A )
# load HuggingFace model
__lowerCAmelCase = BeitForMaskedImageModeling(_A ) if has_lm_head else BeitForImageClassification(_A )
model.eval()
model.load_state_dict(_A )
# Check outputs on an image
__lowerCAmelCase = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_A )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="""pt""" )
__lowerCAmelCase = encoding["""pixel_values"""]
__lowerCAmelCase = model(_A )
__lowerCAmelCase = outputs.logits
# verify logits
__lowerCAmelCase = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_A ), "Shape of logits not as expected"
Path(_A ).mkdir(exist_ok=_A )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
if push_to_hub:
if has_lm_head:
__lowerCAmelCase = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowerCAmelCase = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_A , )
model.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_A , )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
_a : Any = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 689
|
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCamelCase__ ( _A: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = 2
while True:
if is_prime(_A ):
yield num
num += 1
def UpperCamelCase__ ( _A: int = 2000000 ):
'''simple docstring'''
return sum(takewhile(lambda _A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 479
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class _snake_case ( lowercase__):
UpperCamelCase__ : Dict =["""pixel_values"""]
def __init__( self : Optional[int], __lowercase : bool = True, __lowercase : Dict[str, int] = None, __lowercase : int = 0.9, __lowercase : PILImageResampling = PILImageResampling.BICUBIC, __lowercase : bool = True, __lowercase : Dict[str, int] = None, __lowercase : Union[int, float] = 1 / 255, __lowercase : bool = True, __lowercase : bool = True, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, **__lowercase : Union[str, Any], ):
super().__init__(**__lowercase )
lowercase__ = size if size is not None else {"shortest_edge": 224}
lowercase__ = get_size_dict(__lowercase, default_to_square=__lowercase )
lowercase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase__ = get_size_dict(__lowercase, param_name="crop_size" )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = crop_pct
lowercase__ = resample
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self : Dict, __lowercase : np.ndarray, __lowercase : Dict[str, int], __lowercase : Optional[float] = None, __lowercase : PILImageResampling = PILImageResampling.BICUBIC, __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : List[str], ):
lowercase__ = get_size_dict(__lowercase, default_to_square=__lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase__ = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase__ = int(size["height"] / crop_pct )
else:
lowercase__ = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(__lowercase ) )
lowercase__ = get_resize_output_image_size(__lowercase, size=__lowercase, default_to_square=__lowercase )
else:
if "shortest_edge" in size:
lowercase__ = get_resize_output_image_size(__lowercase, size=size["shortest_edge"], default_to_square=__lowercase )
elif "height" in size and "width" in size:
lowercase__ = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(__lowercase ) )
return resize(__lowercase, size=__lowercase, resample=__lowercase, data_format=__lowercase, **__lowercase )
def A__ ( self : Any, __lowercase : np.ndarray, __lowercase : Dict[str, int], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Any, ):
lowercase__ = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__lowercase, size=(size["height"], size["width"]), data_format=__lowercase, **__lowercase )
def A__ ( self : Optional[int], __lowercase : np.ndarray, __lowercase : Union[int, float], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : List[str], ):
return rescale(__lowercase, scale=__lowercase, data_format=__lowercase, **__lowercase )
def A__ ( self : str, __lowercase : np.ndarray, __lowercase : Union[float, List[float]], __lowercase : Union[float, List[float]], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Tuple, ):
return normalize(__lowercase, mean=__lowercase, std=__lowercase, data_format=__lowercase, **__lowercase )
def A__ ( self : List[str], __lowercase : ImageInput, __lowercase : bool = None, __lowercase : Dict[str, int] = None, __lowercase : int = None, __lowercase : PILImageResampling = None, __lowercase : bool = None, __lowercase : Dict[str, int] = None, __lowercase : bool = None, __lowercase : float = None, __lowercase : bool = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[str, TensorType]] = None, __lowercase : ChannelDimension = ChannelDimension.FIRST, **__lowercase : Tuple, ):
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = crop_pct if crop_pct is not None else self.crop_pct
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(__lowercase, default_to_square=__lowercase )
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(__lowercase, param_name="crop_size" )
lowercase__ = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=__lowercase, size=__lowercase, crop_pct=__lowercase, resample=__lowercase ) for image in images]
if do_center_crop:
lowercase__ = [self.center_crop(image=__lowercase, size=__lowercase ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=__lowercase, scale=__lowercase ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=__lowercase, mean=__lowercase, std=__lowercase ) for image in images]
lowercase__ = [to_channel_dimension_format(__lowercase, __lowercase ) for image in images]
lowercase__ = {"pixel_values": images}
return BatchFeature(data=__lowercase, tensor_type=__lowercase )
| 37
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 1
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 489
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
_SCREAMING_SNAKE_CASE : List[Any] = precision
_SCREAMING_SNAKE_CASE : List[str] = ceil(precision / 14 )
_SCREAMING_SNAKE_CASE : List[str] = 426_880 * Decimal(10_005 ).sqrt()
_SCREAMING_SNAKE_CASE : str = 1
_SCREAMING_SNAKE_CASE : List[str] = 13_591_409
_SCREAMING_SNAKE_CASE : str = Decimal(__SCREAMING_SNAKE_CASE )
for k in range(1 , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 338
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :List[str] , _lowercase :Any , _lowercase :Dict=7 , _lowercase :Union[str, Any]=3 , _lowercase :Tuple=18 , _lowercase :Any=30 , _lowercase :List[str]=400 , _lowercase :List[str]=True , _lowercase :List[Any]=None , _lowercase :str=True , _lowercase :Dict=None , _lowercase :Optional[Any]=True , _lowercase :str=[0.5, 0.5, 0.5] , _lowercase :Dict=[0.5, 0.5, 0.5] , ) -> Dict:
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 18}
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def __a ( self :Dict) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =LevitImageProcessor if is_vision_available() else None
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = LevitImageProcessingTester(self)
@property
def __a ( self :Optional[int]) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCamelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCamelCase__ , '''do_center_crop'''))
self.assertTrue(hasattr(UpperCamelCase__ , '''size'''))
def __a ( self :List[Any]) -> List[str]:
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def __a ( self :Optional[int]) -> List[Any]:
pass
def __a ( self :int) -> Dict:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __a ( self :Union[str, Any]) -> List[str]:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __a ( self :Union[str, Any]) -> List[str]:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 700
|
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ = head.next, head
while fast and fast.next:
UpperCAmelCase_ = fast.next.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ = None
while second:
UpperCAmelCase_ = second.next
UpperCAmelCase_ = node
UpperCAmelCase_ = second
UpperCAmelCase_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ = node.next
UpperCAmelCase_ = head.next
return True
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ = [slow.val]
while slow.next:
UpperCAmelCase_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ = cur.next
return True
def A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
while head:
if head.val in d:
d[head.val].append(__UpperCAmelCase )
else:
UpperCAmelCase_ = [pos]
UpperCAmelCase_ = head.next
pos += 1
UpperCAmelCase_ = pos - 1
UpperCAmelCase_ = 0
for v in d.values():
if len(__UpperCAmelCase ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ = 0
for i in range(0 , len(__UpperCAmelCase ) ):
if v[i] + v[len(__UpperCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 561
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = 'data2vec-text'
def __init__( self : str , snake_case_ : Dict=3_0522 , snake_case_ : Union[str, Any]=768 , snake_case_ : List[str]=12 , snake_case_ : int=12 , snake_case_ : Any=3072 , snake_case_ : Optional[int]="gelu" , snake_case_ : str=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : Optional[int]=512 , snake_case_ : Tuple=2 , snake_case_ : Optional[int]=0.02 , snake_case_ : str=1E-12 , snake_case_ : Optional[Any]=1 , snake_case_ : Any=0 , snake_case_ : str=2 , snake_case_ : int="absolute" , snake_case_ : Union[str, Any]=True , snake_case_ : List[str]=None , **snake_case_ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
A : Any = vocab_size
A : Optional[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : Optional[int] = hidden_act
A : List[str] = intermediate_size
A : Any = hidden_dropout_prob
A : str = attention_probs_dropout_prob
A : Optional[Any] = max_position_embeddings
A : Any = type_vocab_size
A : Optional[int] = initializer_range
A : Optional[int] = layer_norm_eps
A : str = position_embedding_type
A : List[str] = use_cache
A : List[str] = classifier_dropout
class _SCREAMING_SNAKE_CASE ( snake_case ):
@property
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 256
|
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _lowerCamelCase ( ):
'''simple docstring'''
print('''Making key files...''' )
make_key_files('''rsa''' , 1024 )
print('''Key files generation successful.''' )
def _lowerCamelCase ( lowerCamelCase_: int ):
'''simple docstring'''
print('''Generating prime p...''' )
A : Optional[Any] = rabinMiller.generate_large_prime(lowerCamelCase_ )
print('''Generating prime q...''' )
A : Tuple = rabinMiller.generate_large_prime(lowerCamelCase_ )
A : Optional[Any] = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
A : Any = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCamelCase_ , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
A : Optional[Any] = cryptoMath.find_mod_inverse(lowerCamelCase_ , (p - 1) * (q - 1) )
A : List[Any] = (n, e)
A : Optional[Any] = (n, d)
return (public_key, private_key)
def _lowerCamelCase ( lowerCamelCase_: str , lowerCamelCase_: int ):
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
A , A : Union[str, Any] = generate_key(lowerCamelCase_ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , '''w''' ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , '''w''' ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 256
| 1
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
'''simple docstring'''
__A : Optional[int] = BeautifulSoup(requests.get(__A , params=__A ).content , 'html.parser' )
__A : List[Any] = soup.find('div' , attrs={'class': 'gs_ri'} )
__A : int = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCamelCase : List[Any] ={
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 20_18,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 701
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase : Tuple =logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] ={
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = "umt5"
_UpperCAmelCase = ["past_key_values"]
def __init__( self , __lowerCamelCase=250112 , __lowerCamelCase=512 , __lowerCamelCase=64 , __lowerCamelCase=1024 , __lowerCamelCase=8 , __lowerCamelCase=None , __lowerCamelCase=6 , __lowerCamelCase=32 , __lowerCamelCase=128 , __lowerCamelCase=0.1 , __lowerCamelCase=1e-6 , __lowerCamelCase=1.0 , __lowerCamelCase="gated-gelu" , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase="T5Tokenizer" , __lowerCamelCase=True , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=0 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__lowerCamelCase , tokenizer_class=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
__A : Union[str, Any] = vocab_size
__A : Any = d_model
__A : str = d_kv
__A : List[Any] = d_ff
__A : Union[str, Any] = num_layers
__A : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__A : Union[str, Any] = num_heads
__A : str = relative_attention_num_buckets
__A : Union[str, Any] = relative_attention_max_distance
__A : int = dropout_rate
__A : int = layer_norm_epsilon
__A : int = initializer_factor
__A : List[Any] = feed_forward_proj
__A : str = use_cache
__A : str = self.feed_forward_proj.split('-' )
__A : str = act_info[-1]
__A : Any = act_info[0] == 'gated'
if len(__lowerCamelCase ) > 1 and act_info[0] != "gated" or len(__lowerCamelCase ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__A : Optional[int] = 'gelu_new'
@property
def _a ( self ):
'''simple docstring'''
return self.d_model
@property
def _a ( self ):
'''simple docstring'''
return self.num_heads
@property
def _a ( self ):
'''simple docstring'''
return self.num_layers
class __snake_case( A_ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _a ( self ):
'''simple docstring'''
__A : List[Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__A : int = 'past_encoder_sequence + sequence'
__A : List[str] = {0: 'batch'}
__A : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__A : List[str] = {0: 'batch', 1: 'decoder_sequence'}
__A : str = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _a ( self ):
'''simple docstring'''
return 13
@property
def _a ( self ):
'''simple docstring'''
return 5e-4
| 237
| 0
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
def decorator(_UpperCamelCase : Optional[Any] ):
_SCREAMING_SNAKE_CASE =getattr(_UpperCamelCase , 'handle_key' , [] )
handle += [key]
setattr(_UpperCamelCase , 'handle_key' , _UpperCamelCase )
return func
return decorator
def _lowerCAmelCase ( *_UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
def decorator(_UpperCamelCase : List[str] ):
_SCREAMING_SNAKE_CASE =getattr(_UpperCamelCase , 'handle_key' , [] )
handle += keys
setattr(_UpperCamelCase , 'handle_key' , _UpperCamelCase )
return func
return decorator
class A__ ( A__ ):
def __new__( cls : Union[str, Any] , _a : Any , _a : Dict , _a : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =super().__new__(cls , _a , _a , _a )
if not hasattr(_a , 'key_handler' ):
setattr(_a , 'key_handler' , {} )
setattr(_a , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE =getattr(_a , 'handle_key' , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE =value
return new_cls
@staticmethod
def A ( cls : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE =ord(_a )
_SCREAMING_SNAKE_CASE =cls.key_handler.get(_a )
if handler:
_SCREAMING_SNAKE_CASE =char
return handler(cls )
else:
return None
def _lowerCAmelCase ( cls : int ) -> Optional[int]:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 405
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase : str = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : str=None , _UpperCamelCase : int=None , _UpperCamelCase : str=None , _UpperCamelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
_SCREAMING_SNAKE_CASE =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_SCREAMING_SNAKE_CASE =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A__ :
def __init__( self : Optional[Any] , _a : Union[str, Any] , _a : List[Any]=13 , _a : int=7 , _a : List[Any]=True , _a : Optional[Any]=False , _a : Tuple=99 , _a : List[str]=16 , _a : Any=2 , _a : Tuple=4 , _a : List[str]=4 , _a : Optional[int]="gelu" , _a : List[str]=0.1 , _a : Optional[Any]=0.1 , _a : int=32 , _a : int=2 , _a : str=1 , _a : Dict=0 , _a : Tuple=0.02 , ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =eos_token_id
_SCREAMING_SNAKE_CASE =pad_token_id
_SCREAMING_SNAKE_CASE =bos_token_id
_SCREAMING_SNAKE_CASE =initializer_range
def A ( self : int ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_SCREAMING_SNAKE_CASE =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_SCREAMING_SNAKE_CASE =shift_tokens_right(_a , 1 , 2 )
_SCREAMING_SNAKE_CASE =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_a , )
_SCREAMING_SNAKE_CASE =prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def A ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
return config, inputs_dict
def A ( self : Dict , _a : Dict , _a : Union[str, Any] , _a : List[str] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =20
_SCREAMING_SNAKE_CASE =model_class_name(_a )
_SCREAMING_SNAKE_CASE =model.encode(inputs_dict['input_ids'] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_SCREAMING_SNAKE_CASE =model.init_cache(decoder_input_ids.shape[0] , _a , _a )
_SCREAMING_SNAKE_CASE =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_SCREAMING_SNAKE_CASE =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
_SCREAMING_SNAKE_CASE =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, -1:] , _a , decoder_attention_mask=_a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_a , )
_SCREAMING_SNAKE_CASE =model.decode(_a , _a )
_SCREAMING_SNAKE_CASE =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
def A ( self : Optional[int] , _a : List[str] , _a : Optional[Any] , _a : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =20
_SCREAMING_SNAKE_CASE =model_class_name(_a )
_SCREAMING_SNAKE_CASE =model.encode(inputs_dict['input_ids'] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_SCREAMING_SNAKE_CASE =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_SCREAMING_SNAKE_CASE =model.init_cache(decoder_input_ids.shape[0] , _a , _a )
_SCREAMING_SNAKE_CASE =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
_SCREAMING_SNAKE_CASE =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, -1:] , _a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_a , decoder_position_ids=_a , )
_SCREAMING_SNAKE_CASE =model.decode(_a , _a , decoder_attention_mask=_a )
_SCREAMING_SNAKE_CASE =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( unittest.TestCase ):
A__ = 99
def A ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_SCREAMING_SNAKE_CASE =input_ids.shape[0]
_SCREAMING_SNAKE_CASE =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_config_and_data()
_SCREAMING_SNAKE_CASE =FlaxBlenderbotSmallForConditionalGeneration(_a )
_SCREAMING_SNAKE_CASE =lm_model(input_ids=_a )
_SCREAMING_SNAKE_CASE =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _a )
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_SCREAMING_SNAKE_CASE =FlaxBlenderbotSmallForConditionalGeneration(_a )
_SCREAMING_SNAKE_CASE =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE =lm_model(input_ids=_a , decoder_input_ids=_a )
_SCREAMING_SNAKE_CASE =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _a )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE =shift_tokens_right(_a , 1 , 2 )
_SCREAMING_SNAKE_CASE =np.equal(_a , 1 ).astype(np.floataa ).sum()
_SCREAMING_SNAKE_CASE =np.equal(_a , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_a , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A__ ( A__ , unittest.TestCase , A__ ):
A__ = True
A__ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
A__ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def A ( self : Tuple ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =FlaxBlenderbotSmallModelTester(self )
def A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_a , _a , _a )
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_a , _a , _a )
def A ( self : List[str] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_a , _a )
_SCREAMING_SNAKE_CASE =model_class(_a )
@jax.jit
def encode_jitted(_a : Optional[Any] , _a : Optional[Any]=None , **_a : Any ):
return model.encode(input_ids=_a , attention_mask=_a )
with self.subTest('JIT Enabled' ):
_SCREAMING_SNAKE_CASE =encode_jitted(**_a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE =encode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( self : str ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_SCREAMING_SNAKE_CASE ={
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_a : Tuple , _a : Any , _a : Optional[Any] ):
return model.decode(
decoder_input_ids=_a , decoder_attention_mask=_a , encoder_outputs=_a , )
with self.subTest('JIT Enabled' ):
_SCREAMING_SNAKE_CASE =decode_jitted(**_a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE =decode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self : Any ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_SCREAMING_SNAKE_CASE =np.ones((1, 1) ) * model.config.eos_token_id
_SCREAMING_SNAKE_CASE =model(_a )
self.assertIsNotNone(_a )
| 405
| 1
|
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "scipy"]
def __init__(self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int) ->str:
'''simple docstring'''
requires_backends(self , ["torch", "scipy"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str) ->int:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"])
| 716
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__A = datasets.utils.logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
lowercase_ = 1_0000
lowercase_ = None
lowercase_ = None
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowercase_ = ParquetConfig
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""")
lowerCamelCase__: Optional[Any] =dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase_ , (str, list, tuple)):
lowerCamelCase__: Any =data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__: Optional[int] =[dl_manager.iter_files(UpperCAmelCase_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
lowerCamelCase__: int =[]
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: List[Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__: str =[dl_manager.iter_files(UpperCAmelCase_) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCAmelCase_):
with open(UpperCAmelCase_ , "rb") as f:
lowerCamelCase__: Union[str, Any] =datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase_))
break
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={"files": files}))
return splits
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase__: str =table_cast(UpperCAmelCase_ , self.info.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: str =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""")
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_)):
with open(UpperCAmelCase_ , "rb") as f:
lowerCamelCase__: Optional[Any] =pq.ParquetFile(UpperCAmelCase_)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
lowerCamelCase__: Optional[Any] =pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCAmelCase_)
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_)}: {e}""")
raise
| 437
| 0
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any ):
lowerCAmelCase = k_size // 2
lowerCAmelCase ,lowerCAmelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ):
lowerCAmelCase ,lowerCAmelCase = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase = height - k_size + 1
lowerCAmelCase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
lowerCAmelCase = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = ravel(_UpperCAmelCase )
# reshape and get the dst image
lowerCAmelCase = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__UpperCamelCase : Tuple = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
__UpperCamelCase : int = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCamelCase : Optional[Any] = gaussian_filter(gray, 3, sigma=1)
__UpperCamelCase : Tuple = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 4
|
from __future__ import annotations
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : list[list[int]] =[]
create_all_state(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , [] , SCREAMING_SNAKE_CASE )
return result
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[list[int]] , ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE , level - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_list.pop()
def lowercase_ ( SCREAMING_SNAKE_CASE : list[list[int]] ):
"""simple docstring"""
for i in total_list:
print(*SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 381
| 0
|
'''simple docstring'''
import requests
A_ = "YOUR API KEY"
def A_ ( snake_case , snake_case = giphy_api_key ):
SCREAMING_SNAKE_CASE:str = "+".join(query.split() )
SCREAMING_SNAKE_CASE:List[Any] = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
SCREAMING_SNAKE_CASE:int = requests.get(snake_case ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 465
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
_A : Union[str, Any] = ['''pixel_values''']
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = size if size is not None else {"shortest_edge": 384}
SCREAMING_SNAKE_CASE:List[str] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = do_resize
SCREAMING_SNAKE_CASE:List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE:Dict = crop_pct if crop_pct is not None else 224 / 256
SCREAMING_SNAKE_CASE:Any = resample
SCREAMING_SNAKE_CASE:Dict = do_rescale
SCREAMING_SNAKE_CASE:Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE:Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE:Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE:Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : int ,):
SCREAMING_SNAKE_CASE:List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE:Any = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE:Any = int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE:Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE__ ,size=(shortest_edge, shortest_edge) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE__ ,size=(shortest_edge, shortest_edge) ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
SCREAMING_SNAKE_CASE:Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE:List[str] = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE:Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE:List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE:str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE:str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE:Any = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE:Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE:Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE:Tuple = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE:int = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE:Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,crop_pct=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE:Union[str, Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE:Optional[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:int = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__ )
| 465
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CLIPTokenizer
SCREAMING_SNAKE_CASE = CLIPTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ : Optional[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase__ : Optional[int] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCAmelCase__ : List[str] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
UpperCAmelCase__ : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = """lower newer"""
UpperCAmelCase__ : List[Any] = """lower newer"""
return input_text, output_text
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase__ : Tuple = """lower newer"""
UpperCAmelCase__ : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
UpperCAmelCase__ : int = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : List[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@require_ftfy
def _a (self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : str = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
UpperCAmelCase__ : Union[str, Any] = tokenizer_s.tokenize(_lowerCamelCase )
UpperCAmelCase__ : Dict = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCAmelCase__ : List[Any] = """xa\u0303y""" + """ """ + """x\xe3y"""
UpperCAmelCase__ : Dict = tokenizer_s.tokenize(_lowerCamelCase )
UpperCAmelCase__ : Any = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on unicode of space type
UpperCAmelCase__ : Optional[Any] = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCAmelCase__ : List[str] = tokenizer_s.tokenize(_lowerCamelCase )
UpperCAmelCase__ : Any = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on unicode of line break type
UpperCAmelCase__ : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCAmelCase__ : Any = tokenizer_s.tokenize(_lowerCamelCase )
UpperCAmelCase__ : Tuple = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : str = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase__ : Dict = F"""{text_of_1_token} {text_of_1_token}"""
UpperCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , )
UpperCAmelCase__ : Union[str, Any] = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ) + 1, len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCAmelCase__ : List[Any] = F""" {text}"""
UpperCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , )
UpperCAmelCase__ : int = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ) + 1, 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
def _a (self ):
"""simple docstring"""
with self.assertRaises(_lowerCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def _a (self ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def _a (self ):
"""simple docstring"""
pass
| 182
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline
SCREAMING_SNAKE_CASE = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
SCREAMING_SNAKE_CASE = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
SCREAMING_SNAKE_CASE = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE = False
@property
def _a (self ):
"""simple docstring"""
return 32
@property
def _a (self ):
"""simple docstring"""
return 32
@property
def _a (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _a (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a (self ):
"""simple docstring"""
return 100
@property
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _a (self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCAmelCase__ : List[Any] = MultilingualCLIP(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def _a (self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase__ : List[Any] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def _a (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a (self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.dummy_text_encoder
UpperCAmelCase__ : Optional[Any] = self.dummy_tokenizer
UpperCAmelCase__ : Optional[Any] = self.dummy_unet
UpperCAmelCase__ : List[Any] = self.dummy_movq
UpperCAmelCase__ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_lowerCamelCase , )
UpperCAmelCase__ : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a (self , _lowerCamelCase , _lowerCamelCase=0 ):
"""simple docstring"""
UpperCAmelCase__ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
UpperCAmelCase__ : List[Any] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase__ : Optional[int] = 0
if str(_lowerCamelCase ).startswith("""mps""" ):
UpperCAmelCase__ : Tuple = torch.manual_seed(_lowerCamelCase )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCAmelCase__ : Any = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """cpu"""
UpperCAmelCase__ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase__ : Optional[Any] = self.pipeline_class(**_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase__ : List[str] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Any = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _a (self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
UpperCAmelCase__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase__ : List[str] = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : int = """a hat"""
UpperCAmelCase__ : List[Any] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
UpperCAmelCase__ : str = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
UpperCAmelCase__ : int = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase__ : Union[str, Any] = pipeline(
_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
UpperCAmelCase__ : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 182
| 1
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE ,unittest.TestCase):
'''simple docstring'''
UpperCamelCase__ : str = GPTaTokenizer
UpperCamelCase__ : Dict = GPTaTokenizerFast
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : int = {"""add_prefix_space""": True}
UpperCamelCase__ : Optional[Any] = False
def _a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
a__ = dict(zip(a_ , range(len(a_ ) ) ) )
a__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a__ = {"""unk_token""": """<unk>"""}
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def _a ( self , **a_ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a_ )
def _a ( self , **a_ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def _a ( self , a_ ):
a__ = """lower newer"""
a__ = """lower newer"""
return input_text, output_text
def _a ( self ):
a__ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ = """lower newer"""
a__ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
a__ = tokenizer.tokenize(a_ , add_prefix_space=a_ )
self.assertListEqual(a_ , a_ )
a__ = tokens + [tokenizer.unk_token]
a__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def _a ( self ):
if not self.test_rust_tokenizer:
return
a__ = self.get_tokenizer()
a__ = self.get_rust_tokenizer(add_prefix_space=a_ )
a__ = """lower newer"""
# Testing tokenization
a__ = tokenizer.tokenize(a_ , add_prefix_space=a_ )
a__ = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
a__ = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
a__ = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
a__ = self.get_rust_tokenizer(add_prefix_space=a_ )
a__ = tokenizer.encode(a_ , add_prefix_space=a_ )
a__ = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# Testing the unknown token
a__ = tokens + [rust_tokenizer.unk_token]
a__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def _a ( self , *a_ , **a_ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _a ( self , a_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
a__ = """This is a simple input"""
a__ = ["""This is a simple input 1""", """This is a simple input 2"""]
a__ = ("""This is a simple input""", """This is a pair""")
a__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="""max_length""" )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="""max_length""" )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="""max_length""" , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="""max_length""" )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="""max_length""" )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="""max_length""" , )
def _a ( self ):
a__ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
a__ = """This is a simple input"""
a__ = ["""This is a simple input looooooooong""", """This is a simple input"""]
a__ = ("""This is a simple input""", """This is a pair""")
a__ = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
a__ = tokenizer.pad_token_id
a__ = tokenizer(a_ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
a__ = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="""np""" )
a__ = tokenizer(*a_ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
a__ = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _a ( self ):
a__ = """$$$"""
a__ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ )
a__ = """This is a simple input"""
a__ = ["""This is a simple input 1""", """This is a simple input 2"""]
a__ = tokenizer.bos_token_id
a__ = tokenizer(a_ )
a__ = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0] , a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a__ = tokenizer.decode(out_s.input_ids )
a__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _a ( self ):
pass
def _a ( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
a__ = [self.get_tokenizer(do_lower_case=a_ , add_bos_token=a_ )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a__ = """Encode this."""
a__ = """This one too please."""
a__ = tokenizer.encode(a_ , add_special_tokens=a_ )
encoded_sequence += tokenizer.encode(a_ , add_special_tokens=a_ )
a__ = tokenizer.encode_plus(
a_ , a_ , add_special_tokens=a_ , return_special_tokens_mask=a_ , )
a__ = encoded_sequence_dict["""input_ids"""]
a__ = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(a_ ) , len(a_ ) )
a__ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a_ )
]
a__ = [x for x in filtered_sequence if x is not None]
self.assertEqual(a_ , a_ )
@require_tokenizers
class __snake_case ( unittest.TestCase):
'''simple docstring'''
def _a ( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
a__ = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a_ )
a__ = """A photo of a cat"""
a__ = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("""test_opt""" )
a__ = AutoTokenizer.from_pretrained("""./test_opt""" )
a__ = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 250, 1_345, 9, 10, 4_758] )
def _a ( self ):
a__ = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=a_ )
a__ = """A photo of a cat"""
a__ = tokenizer.encode(
a_ , )
# Same as above
self.assertEqual(a_ , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def _a ( self ):
a__ = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a_ )
a__ = """bos"""
a__ = tokenizer.get_vocab()["""bos"""]
a__ = """A photo of a cat"""
a__ = tokenizer.encode(
a_ , )
# We changed the bos token
self.assertEqual(a_ , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("""./tok""" )
a__ = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
a__ = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [31_957, 250, 1_345, 9, 10, 4_758] )
| 704
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False, False, False
@dataclass
class __snake_case :
'''simple docstring'''
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : bool = True
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[str] = None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] = "dict"
UpperCamelCase__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()})
UpperCamelCase__ : str = field(default="""Audio""" ,init=SCREAMING_SNAKE_CASE ,repr=SCREAMING_SNAKE_CASE)
def __call__( self ):
return self.pa_type
def _a ( self , a_ ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(a_ , a_ ):
return {"bytes": None, "path": value}
elif isinstance(a_ , a_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
a__ = BytesIO()
sf.write(a_ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
a__ = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
a__ = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
a__ = BytesIO(bytes() )
sf.write(a_ , a_ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _a ( self , a_ , a_ = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
a__ , a__ = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
a__ = xsplitext(a_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
a__ = token_per_repo_id or {}
a__ = path.split("""::""" )[-1]
try:
a__ = string_to_dict(a_ , config.HUB_DATASETS_URL )["""repo_id"""]
a__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
a__ = None
with xopen(a_ , """rb""" , use_auth_token=a_ ) as f:
a__ , a__ = sf.read(a_ )
else:
a__ , a__ = sf.read(a_ )
a__ = array.T
if self.mono:
a__ = librosa.to_mono(a_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
a__ = librosa.resample(a_ , orig_sr=a_ , target_sr=self.sampling_rate )
a__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _a ( self ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _a ( self , a_ ):
if pa.types.is_string(storage.type ):
a__ = pa.array([None] * len(a_ ) , type=pa.binary() )
a__ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
a__ = pa.array([None] * len(a_ ) , type=pa.string() )
a__ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
a__ = pa.array([Audio().encode_example(a_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
a__ = storage.field("""bytes""" )
else:
a__ = pa.array([None] * len(a_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
a__ = storage.field("""path""" )
else:
a__ = pa.array([None] * len(a_ ) , type=pa.string() )
a__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(a_ , self.pa_type )
def _a ( self , a_ ):
@no_op_if_value_is_null
def path_to_bytes(a_ ):
with xopen(a_ , """rb""" ) as f:
a__ = f.read()
return bytes_
a__ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
a__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
a__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(a_ , self.pa_type )
| 351
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_SCREAMING_SNAKE_CASE : Dict = pytest.mark.integration
@require_faiss
class A ( lowerCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple):
_lowercase: str = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(_UpperCamelCase) for x in np.arange(30).tolist()]})
return dset
def UpperCAmelCase__ ( self : Union[str, Any]):
import faiss
_lowercase: Any = self._create_dummy_dataset()
_lowercase: Dict = dset.map(
lambda _UpperCamelCase , _UpperCamelCase: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=_UpperCamelCase , keep_in_memory=_UpperCamelCase)
_lowercase: Dict = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT)
_lowercase , _lowercase: Union[str, Any] = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["filename"][0] , "my_name-train_29")
dset.drop_index("vecs")
def UpperCAmelCase__ ( self : List[Any]):
import faiss
_lowercase: Optional[Any] = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_lowercase , _lowercase: List[Any] = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["filename"][0] , "my_name-train_29")
def UpperCAmelCase__ ( self : List[str]):
import faiss
_lowercase: Tuple = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCamelCase) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name)
dset.load_faiss_index("vecs2" , tmp_file.name)
os.unlink(tmp_file.name)
_lowercase , _lowercase: List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples["filename"][0] , "my_name-train_29")
def UpperCAmelCase__ ( self : Any):
_lowercase: str = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name="vecs")
dset.drop_index("vecs")
self.assertRaises(_UpperCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa)))
def UpperCAmelCase__ ( self : str):
from elasticsearch import Elasticsearch
_lowercase: Union[str, Any] = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search") as mocked_search, patch(
"elasticsearch.client.IndicesClient.create") as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk:
_lowercase: Any = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30)
_lowercase: int = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
_lowercase: List[str] = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=_UpperCamelCase)
_lowercase , _lowercase: Dict = dset.get_nearest_examples("filename" , "my_name-train_29")
self.assertEqual(examples["filename"][0] , "my_name-train_29")
@require_faiss
class A ( lowerCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any]):
import faiss
_lowercase: Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 10)
# single query
_lowercase: Optional[int] = np.zeros(5 , dtype=np.floataa)
_lowercase: Optional[Any] = 1
_lowercase , _lowercase: Union[str, Any] = index.search(_UpperCamelCase)
self.assertRaises(_UpperCamelCase , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
_lowercase: Tuple = np.eye(5 , dtype=np.floataa)[::-1]
_lowercase , _lowercase: Union[str, Any] = index.search_batch(_UpperCamelCase)
self.assertRaises(_UpperCamelCase , index.search_batch , queries[0])
_lowercase: Optional[int] = [scores[0] for scores in total_scores]
_lowercase: Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , _UpperCamelCase)
def UpperCAmelCase__ ( self : Any):
import faiss
_lowercase: Optional[Any] = FaissIndex(string_factory="Flat")
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
_lowercase: Tuple = FaissIndex(string_factory="LSH")
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(_UpperCamelCase):
_lowercase: Optional[int] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5))
def UpperCAmelCase__ ( self : Tuple):
import faiss
_lowercase: Dict = faiss.IndexFlat(5)
_lowercase: str = FaissIndex(custom_index=_UpperCamelCase)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def UpperCAmelCase__ ( self : Optional[int]):
import faiss
_lowercase: Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCamelCase) as tmp_file:
index.save(tmp_file.name)
_lowercase: Tuple = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
_lowercase: Union[str, Any] = np.zeros(5 , dtype=np.floataa)
_lowercase: Union[str, Any] = 1
_lowercase , _lowercase: Optional[Any] = index.search(_UpperCamelCase)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def __lowerCAmelCase ( __magic_name__ ):
import faiss
_lowercase: List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_lowercase: Any = "index.faiss"
_lowercase: Dict = f"mock://{index_name}"
index.save(lowerCAmelCase__ , storage_options=mockfs.storage_options )
_lowercase: Optional[int] = FaissIndex.load(lowerCAmelCase__ , storage_options=mockfs.storage_options )
_lowercase: List[Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: List[Any] = 1
_lowercase , _lowercase: Any = index.search(lowerCAmelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A ( lowerCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any]):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search") as mocked_search, patch(
"elasticsearch.client.IndicesClient.create") as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk:
_lowercase: Any = Elasticsearch()
_lowercase: Union[str, Any] = {"acknowledged": True}
_lowercase: Tuple = ElasticSearchIndex(es_client=_UpperCamelCase)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(["foo", "bar", "foobar"])
# single query
_lowercase: Tuple = "foo"
_lowercase: Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_lowercase , _lowercase: Dict = index.search(_UpperCamelCase)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
_lowercase: Tuple = "foo"
_lowercase: Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_lowercase , _lowercase: Dict = index.search(_UpperCamelCase , request_timeout=30)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
_lowercase: int = ["foo", "bar", "foobar"]
_lowercase: List[str] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_lowercase , _lowercase: Dict = index.search_batch(_UpperCamelCase)
_lowercase: Optional[int] = [scores[0] for scores in total_scores]
_lowercase: Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase) , 0)
self.assertListEqual([1, 1, 1] , _UpperCamelCase)
# batched queries with timeout
_lowercase: Dict = ["foo", "bar", "foobar"]
_lowercase: Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_lowercase , _lowercase: str = index.search_batch(_UpperCamelCase , request_timeout=30)
_lowercase: Any = [scores[0] for scores in total_scores]
_lowercase: Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCamelCase) , 0)
self.assertListEqual([1, 1, 1] , _UpperCamelCase)
| 226
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29
| 0
|
'''simple docstring'''
def lowerCAmelCase (__A = 4_000_000):
"""simple docstring"""
_a = [0, 1]
_a = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
_a = 0
for j in range(len(__lowerCAmelCase) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 709
|
'''simple docstring'''
lowercase_ = 256
# Modulus to hash a string
lowercase_ = 1_000_003
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = len(__A)
_a = len(__A)
if p_len > t_len:
return False
_a = 0
_a = 0
_a = 1
# Calculating the hash of pattern and substring of text
for i in range(__A):
_a = (ord(pattern[i]) + p_hash * alphabet_size) % modulus
_a = (ord(text[i]) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_a = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_a = (
(text_hash - ord(text[i]) * modulus_power) * alphabet_size
+ ord(text[i + p_len])
) % modulus
return False
def lowerCAmelCase ():
"""simple docstring"""
_a = '''abc1abc12'''
_a = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_a = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__A , __A) and not rabin_karp(__A , __A)
# Test 2)
_a = '''ABABX'''
_a = '''ABABZABABYABABX'''
assert rabin_karp(__A , __A)
# Test 3)
_a = '''AAAB'''
_a = '''ABAAAAAB'''
assert rabin_karp(__A , __A)
# Test 4)
_a = '''abcdabcy'''
_a = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__A , __A)
# Test 5)
_a = '''Lü'''
_a = '''Lüsai'''
assert rabin_karp(__A , __A)
_a = '''Lue'''
assert not rabin_karp(__A , __A)
print('''Success.''')
if __name__ == "__main__":
test_rabin_karp()
| 352
| 0
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
lowerCamelCase : Tuple = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
lowerCamelCase : Any = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
with open(lowercase , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = collections.OrderedDict()
lowerCamelCase_ = collections.OrderedDict()
lowerCamelCase_ = collections.OrderedDict()
with open(lowercase , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowercase ):
lowerCamelCase_ = b
lowerCamelCase_ = idx
for wd in b:
lowerCamelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : str , A_ : Any , A_ : Any , A_ : Optional[Any]="<|endoftext|>" , A_ : Any="<|endoftext|>" , A_ : Optional[int]="<|startoftext|>" , A_ : Union[str, Any]="<|endoftext|>" , A_ : Any=False , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=A_ , pad_token=A_ , bos_token=A_ , eos_token=A_ , do_clean_text=A_ , **A_ , )
if not os.path.isfile(A_ ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(A_ ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowerCamelCase_ = do_clean_text
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = load_vocab_and_emoji(A_ , A_ )
lowerCamelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def a__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def a__ ( self : Optional[Any] , A_ : str ) -> Tuple:
"""simple docstring"""
return self.subword_tokenizer.tokenize(A_ , clean=self.do_clean_text )
def a__ ( self : Optional[int] , A_ : Dict ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def a__ ( self : Union[str, Any] , A_ : Union[str, Any] ) -> int:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(A_ )
def a__ ( self : Optional[int] , A_ : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = ''.join(A_ ).strip()
return out_string
def a__ ( self : Optional[Any] , A_ : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
return input_ids
def a__ ( self : List[Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ = 0
if os.path.isdir(A_ ):
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowerCamelCase_ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowerCamelCase_ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(A_ , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
lowerCamelCase_ = token_index
writer.write(','.join(A_ ) + '\n' )
index += 1
with open(A_ , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , A_ )
return vocab_file, emoji_file
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , A_ : Union[str, Any] , A_ : int , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = vocab # same as swe
lowerCamelCase_ = ids_to_tokens # same as bpe
lowerCamelCase_ = emoji
lowerCamelCase_ = np.max([len(A_ ) for w in self.vocab.keys()] )
lowerCamelCase_ = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowerCamelCase_ = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowerCamelCase_ = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowerCamelCase_ = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCamelCase_ = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCamelCase_ = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowerCamelCase_ = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowerCamelCase_ = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowerCamelCase_ = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : str ) -> Optional[int]:
"""simple docstring"""
return len(self.ids_to_tokens )
def a__ ( self : Union[str, Any] , A_ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.content_repattera.sub('<URL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<EMAIL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<TEL>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<DATE>' , A_ )
lowerCamelCase_ = self.content_repattera.sub('<PRICE>' , A_ )
lowerCamelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCamelCase_ = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def a__ ( self : int , A_ : Optional[Any] , A_ : Tuple=False ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = text.replace(' ' , '<SP>' )
lowerCamelCase_ = text.replace(' ' , '<SP>' )
lowerCamelCase_ = text.replace('\r\n' , '<BR>' )
lowerCamelCase_ = text.replace('\n' , '<BR>' )
lowerCamelCase_ = text.replace('\r' , '<BR>' )
lowerCamelCase_ = text.replace('\t' , '<TAB>' )
lowerCamelCase_ = text.replace('—' , 'ー' )
lowerCamelCase_ = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCamelCase_ = text.replace(A_ , A_ )
if clean:
lowerCamelCase_ = self.clean_text(A_ )
def check_simbol(A_ : Union[str, Any] ):
lowerCamelCase_ = x.encode()
if len(A_ ) == 1 and len(A_ ) == 2:
lowerCamelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(A_ : Tuple ):
lowerCamelCase_ = x.encode()
if len(A_ ) == 1 and len(A_ ) == 3:
lowerCamelCase_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
lowerCamelCase_ = 0
lowerCamelCase_ = []
while pos < len(A_ ):
lowerCamelCase_ = min(len(A_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowerCamelCase_ = [] # (token_id, token, pos)
for e in range(A_ , A_ , -1 ):
lowerCamelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(A_ ) > 2:
lowerCamelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(A_ ) > 0:
# the smallest token_id is adopted
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sorted(A_ , key=lambda A_ : x[0] )[0]
result.append(A_ )
lowerCamelCase_ = e
else:
lowerCamelCase_ = pos + 1
lowerCamelCase_ = text[pos:end]
if check_simbol(A_ ):
result.append('<KIGOU>' )
elif checkuae(A_ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowerCamelCase_ = end
return result
def a__ ( self : List[Any] , A_ : Tuple , A_ : List[str]="\n" ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) )
lowerCamelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(A_ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(A_ )
if len(A_ ) > 0:
words.append(bytearray(A_ ).decode('utf-8' , errors='replace' ) )
lowerCamelCase_ = ''.join(A_ )
return text
| 70
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
__UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'}
__UpperCAmelCase = '>>zh<<'
__UpperCAmelCase = 'Helsinki-NLP/'
if is_torch_available():
__UpperCAmelCase = 'pt'
elif is_tf_available():
__UpperCAmelCase = 'tf'
else:
__UpperCAmelCase = 'jax'
@require_sentencepiece
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = MarianTokenizer
snake_case_ = False
snake_case_ = True
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
UpperCAmelCase__ : int = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(A ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(A ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(A ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
UpperCAmelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : List[Any] ,**A : List[Any] ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : Union[str, Any] ,A : Tuple ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = """</s>"""
UpperCAmelCase__ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""</s>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""<pad>""" )
self.assertEqual(len(A ) ,9 )
def __lowercase ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,9 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
UpperCAmelCase__ : List[str] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=A )
self.assertIsInstance(A ,A )
UpperCAmelCase__ : str = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(A ,batch.input_ids[0] )
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(A )
UpperCAmelCase__ : Tuple = [x.name for x in Path(A ).glob("""*""" )]
self.assertIn("""source.spm""" ,A )
MarianTokenizer.from_pretrained(A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] ,padding=A ,truncation=A ,return_tensors=A )
self.assertIsInstance(A ,A )
self.assertEqual(batch.input_ids.shape ,(2, 512) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=A ,return_tensors=A )
self.assertIsInstance(A ,A )
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
# fmt: off
UpperCAmelCase__ : Optional[int] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
UpperCAmelCase__ : Any = """Tämä on testi"""
UpperCAmelCase__ : int = """This is a test"""
UpperCAmelCase__ : List[str] = [76, 7, 2_047, 2]
UpperCAmelCase__ : Optional[Any] = [69, 12, 11, 940, 2]
UpperCAmelCase__ : List[str] = tokenizer(A ).input_ids
self.assertListEqual(A ,A )
UpperCAmelCase__ : Optional[int] = tokenizer(text_target=A ).input_ids
self.assertListEqual(A ,A )
UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A )
self.assertEqual(A ,A )
| 65
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = ShapEImgaImgPipeline
_SCREAMING_SNAKE_CASE :Tuple = ["""image"""]
_SCREAMING_SNAKE_CASE :Any = ["""image"""]
_SCREAMING_SNAKE_CASE :int = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE :Tuple = False
@property
def _a ( self ) -> Dict:
"""simple docstring"""
return 32
@property
def _a ( self ) -> Tuple:
"""simple docstring"""
return 32
@property
def _a ( self ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self ) -> Any:
"""simple docstring"""
return 8
@property
def _a ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPVisionModel(_a )
return model
@property
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = CLIPImageProcessor(
crop_size=224 , do_center_crop=_a , do_normalize=_a , do_resize=_a , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = PriorTransformer(**_a )
return model
@property
def _a ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : List[str] = ShapERenderer(**_a )
return model
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.dummy_prior
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_renderer
SCREAMING_SNAKE_CASE__ : Optional[int] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=_a , clip_sample=_a , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _a ( self , _a , _a=0 ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.manual_seed(_a )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = """cpu"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : int = self.pipeline_class(**_a )
SCREAMING_SNAKE_CASE__ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : List[str] = pipe(**self.get_dummy_inputs(_a ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.images[0]
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = torch_device == """cpu"""
SCREAMING_SNAKE_CASE__ : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_a , relax_max_difference=_a , )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.pipeline_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : Dict = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe(**_a , num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
SCREAMING_SNAKE_CASE__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
SCREAMING_SNAKE_CASE__ : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=_a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
_a , generator=_a , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a , _a )
| 12
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
SCREAMING_SNAKE_CASE__ : List[Any] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
SCREAMING_SNAKE_CASE__ : str = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Any:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
SCREAMING_SNAKE_CASE__ : int = dataset
SCREAMING_SNAKE_CASE__ : Any = name
SCREAMING_SNAKE_CASE__ : Optional[Any] = con
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : int = num_proc
SCREAMING_SNAKE_CASE__ : int = to_sql_kwargs
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.to_sql_kwargs.pop("""sql""" , _a )
SCREAMING_SNAKE_CASE__ : Tuple = self.to_sql_kwargs.pop("""con""" , _a )
SCREAMING_SNAKE_CASE__ : Tuple = self.to_sql_kwargs.pop("""index""" , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._write(index=_a , **self.to_sql_kwargs )
return written
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = args
SCREAMING_SNAKE_CASE__ : List[str] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
SCREAMING_SNAKE_CASE__ : Any = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ : Optional[int] = batch.to_pandas()
SCREAMING_SNAKE_CASE__ : List[Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def _a ( self , _a , **_a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 12
| 1
|
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def snake_case__ ( self ) -> Dict:
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ , "embed_dim" ) )
self.parent.assertTrue(hasattr(lowercase__ , "num_heads" ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=[16, 48, 96] , SCREAMING_SNAKE_CASE__=[1, 3, 6] , SCREAMING_SNAKE_CASE__=[1, 2, 10] , SCREAMING_SNAKE_CASE__=[7, 3, 3] , SCREAMING_SNAKE_CASE__=[4, 2, 2] , SCREAMING_SNAKE_CASE__=[2, 1, 1] , SCREAMING_SNAKE_CASE__=[2, 2, 2] , SCREAMING_SNAKE_CASE__=[False, False, True] , SCREAMING_SNAKE_CASE__=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=2 , ) -> int:
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_sizes
A__ = patch_stride
A__ = patch_padding
A__ = is_training
A__ = use_labels
A__ = num_labels
A__ = num_channels
A__ = embed_dim
A__ = num_heads
A__ = stride_kv
A__ = depth
A__ = cls_token
A__ = attention_drop_rate
A__ = initializer_range
A__ = layer_norm_eps
def snake_case__ ( self ) -> List[str]:
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
A__ = CvtModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
A__ = model(lowercase__ )
A__ = (self.image_size, self.image_size)
A__ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
A__ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
A__ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = self.num_labels
A__ = CvtForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
A__ = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self ) -> int:
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
A__ : Any = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
A__ : Tuple = False
A__ : str = False
A__ : Optional[Any] = False
A__ : Union[str, Any] = False
A__ : List[str] = False
def snake_case__ ( self ) -> Any:
A__ = CvtModelTester(self )
A__ = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def snake_case__ ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason="Cvt does not output attentions" )
def snake_case__ ( self ) -> List[Any]:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def snake_case__ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def snake_case__ ( self ) -> Dict:
pass
def snake_case__ ( self ) -> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase__ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase__ )
def snake_case__ ( self ) -> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def snake_case__ ( self ) -> Union[str, Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
A__ = outputs.hidden_states
A__ = len(self.model_tester.depth )
self.assertEqual(len(lowercase__ ) , lowercase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def snake_case__ ( self ) -> Dict:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case__ ( self ) -> List[Any]:
pass
@slow
def snake_case__ ( self ) -> Optional[Any]:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = CvtModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self ) -> Tuple:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowercase__ , return_tensors="pt" ).to(lowercase__ )
# forward pass
with torch.no_grad():
A__ = model(**lowercase__ )
# verify the logits
A__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
A__ = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
| 104
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCAmelCase :Optional[Any] = """Create a default config file for Accelerate with only a few flags set."""
def __lowerCAmelCase ( a_="no" , a_ = default_json_config_file , a_ = False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = Path(a_ )
path.parent.mkdir(parents=a_ , exist_ok=a_ )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
SCREAMING_SNAKE_CASE : List[str] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.device_count()
SCREAMING_SNAKE_CASE : str = num_gpus
SCREAMING_SNAKE_CASE : Dict = False
if num_gpus > 1:
SCREAMING_SNAKE_CASE : List[str] = 'MULTI_GPU'
else:
SCREAMING_SNAKE_CASE : Optional[int] = 'NO'
elif is_xpu_available() and use_xpu:
SCREAMING_SNAKE_CASE : List[str] = torch.xpu.device_count()
SCREAMING_SNAKE_CASE : List[Any] = num_xpus
SCREAMING_SNAKE_CASE : Optional[int] = False
if num_xpus > 1:
SCREAMING_SNAKE_CASE : List[Any] = 'MULTI_XPU'
else:
SCREAMING_SNAKE_CASE : List[Any] = 'NO'
elif is_npu_available():
SCREAMING_SNAKE_CASE : List[str] = torch.npu.device_count()
SCREAMING_SNAKE_CASE : Any = num_npus
SCREAMING_SNAKE_CASE : Dict = False
if num_npus > 1:
SCREAMING_SNAKE_CASE : Any = 'MULTI_NPU'
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'NO'
else:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 'NO'
SCREAMING_SNAKE_CASE : Any = ClusterConfig(**a_ )
config.to_json_file(a_ )
return path
def __lowerCAmelCase ( a_ , a_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parser.add_parser('default' , parents=a_ , help=a_ , formatter_class=a_ )
parser.add_argument(
'--config_file' , default=a_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=a_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=a_ )
return parser
def __lowerCAmelCase ( a_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 251
| 0
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
lowercase : Dict = RobertaTokenizer
lowercase : Union[str, Any] = RobertaTokenizerFast
lowercase : List[str] = True
lowercase : List[Any] = {'cls_token': '<s>'}
def lowercase_ ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
a : Dict = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a : Optional[int] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowercase ) )
def lowercase_ ( self , **__UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase_ ( self , **__UpperCAmelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase_ ( self , __UpperCAmelCase ) -> List[str]:
a : List[str] = 'lower newer'
a : str = 'lower newer'
return input_text, output_text
def lowercase_ ( self ) -> Any:
a : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a : str = 'lower newer'
a : Dict = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
a : Optional[int] = tokenizer.tokenize(_lowercase ) # , add_prefix_space=True)
self.assertListEqual(_lowercase , _lowercase )
a : Any = tokens + [tokenizer.unk_token]
a : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def lowercase_ ( self ) -> str:
a : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_lowercase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_lowercase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def lowercase_ ( self ) -> Optional[int]:
a : Any = self.tokenizer_class.from_pretrained('roberta-base' )
a : Any = tokenizer.encode('sequence builders' , add_special_tokens=_lowercase )
a : int = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowercase )
a : Dict = tokenizer.encode(
'sequence builders' , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
a : Any = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
a : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowercase )
a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase_ ( self ) -> Dict:
a : Tuple = self.get_tokenizer()
a : int = 'Encode this sequence.'
a : int = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
a : Optional[int] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
a : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowercase , _lowercase )
a : List[str] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
a : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowercase , _lowercase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
a : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
a : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
# Testing spaces after special tokens
a : List[str] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase )} ) # mask token has a left space
a : List[str] = tokenizer.convert_tokens_to_ids(_lowercase )
a : Optional[Any] = 'Encode <mask> sequence'
a : str = 'Encode <mask>sequence'
a : int = tokenizer.encode(_lowercase )
a : Optional[int] = encoded.index(_lowercase )
a : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowercase , _lowercase )
a : Tuple = tokenizer.encode(_lowercase )
a : Optional[Any] = encoded.index(_lowercase )
a : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a : str = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
a : Optional[Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
a : Any = 'A, <mask> AllenNLP sentence.'
a : Tuple = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
a : List[str] = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
a : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
a : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowercase_ ( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a : List[str] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
a : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _lowercase )
self.assertEqual(post_processor_state['add_prefix_space'] , _lowercase )
self.assertEqual(post_processor_state['trim_offsets'] , _lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a : Optional[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
a : List[str] = f'{text_of_1_token} {text_of_1_token}'
a : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
a : Optional[Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
a : Optional[Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
a : Any = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
a : Dict = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
a : str = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
a : str = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
a : Dict = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ) + 1, 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
a : Tuple = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
a : List[str] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
a : str = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
a : Any = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
| 707
|
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=10_00 ) -> int:
a : Dict = p_stop
a : Tuple = max_length
def __iter__( self ) -> str:
a : Optional[Any] = 0
a : Union[str, Any] = False
while not stop and count < self.max_length:
yield count
count += 1
a : Optional[int] = random.random() < self.p_stop
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ) -> List[str]:
a : Optional[Any] = [
BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
for i in range(2 )
]
a : str = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( self ) -> List[str]:
# Check the shards when the dataset is a round multiple of total batch size.
a : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
a : int = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
a : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
a : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
a : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
a : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
a : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
a : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is very small.
a : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : List[str] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
a : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : int = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( self ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
a : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
a : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
a : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
a : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
a : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
a : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
a : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : int = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
a : List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowercase_ ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of total batch size.
a : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Optional[int] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
a : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
a : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
a : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
a : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
a : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
a : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
a : Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : str = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Dict = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowercase_ ( self ) -> List[str]:
# Check the shards when the dataset is a round multiple of batch size.
a : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
a : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
a : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
a : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Tuple = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowercase_ ( self ) -> List[Any]:
a : int = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
a : Dict = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ) -> Tuple:
random.seed(__UpperCAmelCase )
a : Dict = list(__UpperCAmelCase )
a : Any = [
IterableDatasetShard(
__UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , )
for i in range(__UpperCAmelCase )
]
a : int = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCAmelCase )
iterable_dataset_lists.append(list(__UpperCAmelCase ) )
a : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
a : Optional[int] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 )
a : Optional[Any] = []
for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCAmelCase ) < len(__UpperCAmelCase ):
reference += reference
self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] )
def lowercase_ ( self ) -> int:
a : Any = 42
a : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
# Edge case with a very small dataset
a : Dict = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowercase_ ( self ) -> List[Any]:
a : str = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Any = SkipBatchSampler(__UpperCAmelCase , 2 )
self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ) -> str:
a : Optional[Any] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ) -> int:
a : List[str] = DataLoader(list(range(16 ) ) , batch_size=4 )
a : Dict = skip_first_batches(__UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ) -> Any:
a : Union[str, Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase_ ( self ) -> List[Any]:
Accelerator()
a : Union[str, Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 509
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__A ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = ['''pixel_values''']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : str = size if size is not None else {'shortest_edge': 2_2_4}
a_ : Any = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='crop_size' )
a_ : Optional[int] = do_resize
a_ : Tuple = size
a_ : List[str] = do_center_crop
a_ : Optional[int] = crop_size
a_ : Tuple = resample
a_ : Tuple = do_rescale
a_ : Optional[int] = rescale_factor
a_ : Optional[int] = do_normalize
a_ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Any , ) -> np.ndarray:
a_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" in size:
a_ : Union[str, Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ )
elif "height" in size and "width" in size:
a_ : Optional[Any] = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray:
a_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Any:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Any , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
a_ : Dict = to_numpy_array(SCREAMING_SNAKE_CASE__ )
if do_resize:
a_ : Optional[Any] = self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ )
if do_center_crop:
a_ : List[str] = self.center_crop(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ )
if do_rescale:
a_ : Tuple = self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ )
if do_normalize:
a_ : Optional[Any] = self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return image
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> PIL.Image.Image:
a_ : Any = do_resize if do_resize is not None else self.do_resize
a_ : Tuple = resample if resample is not None else self.resample
a_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
a_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : str = do_normalize if do_normalize is not None else self.do_normalize
a_ : Tuple = image_mean if image_mean is not None else self.image_mean
a_ : List[Any] = image_std if image_std is not None else self.image_std
a_ : List[Any] = size if size is not None else self.size
a_ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
a_ : int = crop_size if crop_size is not None else self.crop_size
a_ : Tuple = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='crop_size' )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
a_ : List[str] = make_batched(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = [
[
self._preprocess_image(
image=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=SCREAMING_SNAKE_CASE__ , do_rescale=SCREAMING_SNAKE_CASE__ , rescale_factor=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , )
for img in video
]
for video in videos
]
a_ : int = {'pixel_values': videos}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 570
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCAmelCase_ : Tuple = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = '''maskformer'''
snake_case__ : List[str] = {'''hidden_size''': '''mask_feature_size'''}
snake_case__ : Tuple = ['''resnet''', '''swin''']
snake_case__ : Any = ['''detr''']
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : float = 20.0 , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Union[str, Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
a_ : Union[str, Any] = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Tuple = backbone_config.pop('model_type' )
a_ : Dict = CONFIG_MAPPING[backbone_model_type]
a_ : int = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
a_ : List[str] = DetrConfig()
else:
# verify that the decoder is supported
a_ : Dict = (
decoder_config.pop('model_type' ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Dict = CONFIG_MAPPING[decoder_type]
a_ : str = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
a_ : int = backbone_config
a_ : Tuple = decoder_config
# main feature dimension for the model
a_ : str = fpn_feature_size
a_ : List[str] = mask_feature_size
# initializer
a_ : Optional[int] = init_std
a_ : Any = init_xavier_std
# Hungarian matcher && loss
a_ : List[str] = cross_entropy_weight
a_ : Optional[int] = dice_weight
a_ : int = mask_weight
a_ : str = use_auxiliary_loss
a_ : str = no_object_weight
a_ : int = output_auxiliary_logits
a_ : str = self.decoder_config.encoder_attention_heads
a_ : Union[str, Any] = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , SCREAMING_SNAKE_CASE__ : PretrainedConfig , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , decoder_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict[str, any]:
a_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
a_ : Dict = self.backbone_config.to_dict()
a_ : Optional[Any] = self.decoder_config.to_dict()
a_ : List[Any] = self.__class__.model_type
return output
| 570
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=snake_case ):
"""simple docstring"""
lowerCAmelCase__ : str = ['torch', 'scipy']
def __init__( self: str , *__lowerCAmelCase: int , **__lowerCAmelCase: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch", "scipy"] )
@classmethod
def _UpperCAmelCase ( cls: Any , *__lowerCAmelCase: str , **__lowerCAmelCase: Dict ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def _UpperCAmelCase ( cls: Dict , *__lowerCAmelCase: Union[str, Any] , **__lowerCAmelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"] )
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286
| 0
|
from PIL import Image
def _lowerCAmelCase ( _lowerCAmelCase ) -> Image:
'''simple docstring'''
__snake_case , __snake_case = image.size
__snake_case = 0
__snake_case = image.load()
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
__snake_case = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCAmelCase ):
for i in range(_lowerCAmelCase ):
__snake_case = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
A : Union[str, Any] = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 371
|
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase ) -> list[int]:
'''simple docstring'''
__snake_case = 2
__snake_case = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCAmelCase )
if n > 1:
factors.append(_lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : int = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="roberta-prelayernorm"
def __init__( self : List[str] , snake_case__ : str=5_0_2_6_5 , snake_case__ : str=7_6_8 , snake_case__ : str=1_2 , snake_case__ : Any=1_2 , snake_case__ : Any=3_0_7_2 , snake_case__ : int="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Any=0.1 , snake_case__ : str=5_1_2 , snake_case__ : Optional[int]=2 , snake_case__ : int=0.02 , snake_case__ : str=1E-12 , snake_case__ : Any=1 , snake_case__ : Tuple=0 , snake_case__ : List[str]=2 , snake_case__ : Union[str, Any]="absolute" , snake_case__ : Optional[Any]=True , snake_case__ : List[Any]=None , **snake_case__ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
@property
def UpperCamelCase ( self : str ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 673
|
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673
| 1
|
'''simple docstring'''
import requests
lowercase ='' # <-- Put your OpenWeatherMap appid here!
lowercase ='https://api.openweathermap.org/data/2.5/'
def lowerCamelCase__ ( __lowerCamelCase : str = "Chicago" , __lowerCamelCase : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase__ ( __lowerCamelCase : str = "Kolkata, India" , __lowerCamelCase : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase__ ( __lowerCamelCase : float = 55.68 , __lowerCamelCase : float = 12.57 , __lowerCamelCase : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowercase =input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 446
|
'''simple docstring'''
import argparse
lowercase ='docs/source/_static/js/custom.js'
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
with open(__lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
_UpperCAmelCase : Optional[Any] =f.readlines()
_UpperCAmelCase : Dict =0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
_UpperCAmelCase : Tuple =f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(__lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__lowerCamelCase )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowercase =parser.parse_args()
update_custom_js(args.version)
| 446
| 1
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = "Speech2TextFeatureExtractor"
snake_case = "Speech2TextTokenizer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
def __call__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Dict ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
lowerCamelCase__ = kwargs.pop("""raw_speech""" )
else:
lowerCamelCase__ = kwargs.pop("""audio""" , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = kwargs.pop("""sampling_rate""" , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = kwargs.pop("""text""" , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase__ = args[0]
lowerCamelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowerCamelCase__ = self.feature_extractor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None:
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ = encodings["""input_ids"""]
return inputs
def __UpperCAmelCase ( self : int , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@contextmanager
def __UpperCAmelCase ( self : Optional[Any] ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer
yield
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
| 258
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """table-transformer"""
lowerCamelCase_ : List[Any] = ["""past_key_values"""]
lowerCamelCase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=100 , UpperCamelCase__=6 , UpperCamelCase__=2048 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=2048 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1.0 , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , **UpperCamelCase__ , ) -> Dict:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase : Dict = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Any = backbone_config.get("model_type" )
lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : List[str] = config_class.from_dict(UpperCamelCase__ )
# set timm attributes to None
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = None, None, None
lowerCamelCase : Optional[Any] = use_timm_backbone
lowerCamelCase : List[str] = backbone_config
lowerCamelCase : List[str] = num_channels
lowerCamelCase : Union[str, Any] = num_queries
lowerCamelCase : Optional[Any] = d_model
lowerCamelCase : Optional[int] = encoder_ffn_dim
lowerCamelCase : List[Any] = encoder_layers
lowerCamelCase : List[Any] = encoder_attention_heads
lowerCamelCase : List[str] = decoder_ffn_dim
lowerCamelCase : Union[str, Any] = decoder_layers
lowerCamelCase : Dict = decoder_attention_heads
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[Any] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Union[str, Any] = activation_function
lowerCamelCase : Union[str, Any] = init_std
lowerCamelCase : Optional[int] = init_xavier_std
lowerCamelCase : List[Any] = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Dict = encoder_layers
lowerCamelCase : Any = auxiliary_loss
lowerCamelCase : int = position_embedding_type
lowerCamelCase : List[Any] = backbone
lowerCamelCase : Dict = use_pretrained_backbone
lowerCamelCase : Optional[int] = dilation
# Hungarian matcher
lowerCamelCase : Any = class_cost
lowerCamelCase : List[Any] = bbox_cost
lowerCamelCase : int = giou_cost
# Loss coefficients
lowerCamelCase : List[str] = mask_loss_coefficient
lowerCamelCase : Optional[Any] = dice_loss_coefficient
lowerCamelCase : str = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Tuple = eos_coefficient
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowercase ( self ) -> int:
return self.encoder_attention_heads
@property
def _lowercase ( self ) -> int:
return self.d_model
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = version.parse("""1.11""" )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowercase ( self ) -> float:
return 1e-5
@property
def _lowercase ( self ) -> int:
return 12
| 311
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], Type[Formatter]] = {}
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], str] = {}
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], Exception] = {}
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,) -> str:
lowerCamelCase : List[str] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
lowerCamelCase : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
lowerCamelCase : Dict = format_type
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple:
lowerCamelCase : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCamelCase : str = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
SCREAMING_SNAKE_CASE__ : str = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
SCREAMING_SNAKE_CASE__ : str = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
SCREAMING_SNAKE_CASE__ : Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A ( _SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Formatter:
lowerCamelCase : Tuple = get_format_type_from_alias(_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 311
| 1
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
snake_case__ = sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str | Literal[False]:
snake_case__ = list(__lowerCAmelCase )
snake_case__ = list(__lowerCAmelCase )
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
if lista[i] != lista[i]:
count += 1
snake_case__ = '''_'''
if count > 1:
return False
else:
return "".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[str]:
snake_case__ = []
while True:
snake_case__ = ['''$'''] * len(__lowerCAmelCase )
snake_case__ = []
for i in range(len(__lowerCAmelCase ) ):
for j in range(i + 1 , len(__lowerCAmelCase ) ):
snake_case__ = compare_string(binary[i] , binary[j] )
if k is False:
snake_case__ = '''*'''
snake_case__ = '''*'''
temp.append('''X''' )
for i in range(len(__lowerCAmelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCAmelCase ) == 0:
return pi
snake_case__ = list(set(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
snake_case__ = []
for minterm in minterms:
snake_case__ = ''''''
for _ in range(__lowerCAmelCase ):
snake_case__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCAmelCase )
return temp
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
snake_case__ = list(__lowerCAmelCase )
snake_case__ = list(__lowerCAmelCase )
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
snake_case__ = []
snake_case__ = [0] * len(__lowerCAmelCase )
for i in range(len(chart[0] ) ):
snake_case__ = 0
snake_case__ = -1
for j in range(len(__lowerCAmelCase ) ):
if chart[j][i] == 1:
count += 1
snake_case__ = j
if count == 1:
snake_case__ = 1
for i in range(len(__lowerCAmelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCAmelCase ) ):
snake_case__ = 0
temp.append(prime_implicants[i] )
while True:
snake_case__ = 0
snake_case__ = -1
snake_case__ = 0
for i in range(len(__lowerCAmelCase ) ):
snake_case__ = chart[i].count(1 )
if count_n > max_n:
snake_case__ = count_n
snake_case__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCAmelCase ) ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[int]]:
snake_case__ = [[0 for x in range(len(__lowerCAmelCase ) )] for x in range(len(__lowerCAmelCase ) )]
for i in range(len(__lowerCAmelCase ) ):
snake_case__ = prime_implicants[i].count('''_''' )
for j in range(len(__lowerCAmelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowerCAmelCase ):
snake_case__ = 1
return chart
def SCREAMING_SNAKE_CASE ( ) -> None:
snake_case__ = int(input('''Enter the no. of variables\n''' ) )
snake_case__ = [
float(__lowerCAmelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
snake_case__ = decimal_to_binary(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = check(__lowerCAmelCase )
print('''Prime Implicants are:''' )
print(__lowerCAmelCase )
snake_case__ = prime_implicant_chart(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = selection(__lowerCAmelCase , __lowerCAmelCase )
print('''Essential Prime Implicants are:''' )
print(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 208
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """ZinengTang/tvlt-base"""
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
def _snake_case ( self :Optional[int] , **__A :List[str] ) -> Union[str, Any]:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **__A )
def _snake_case ( self :Dict , **__A :Union[str, Any] ) -> str:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = TvltProcessor(image_processor=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __A )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = TvltProcessor(image_processor=__A , feature_extractor=__A )
SCREAMING_SNAKE_CASE__ = np.ones([1_2000] )
SCREAMING_SNAKE_CASE__ = feature_extractor(__A , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processor(audio=__A , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = TvltProcessor(image_processor=__A , feature_extractor=__A )
SCREAMING_SNAKE_CASE__ = np.ones([3, 224, 224] )
SCREAMING_SNAKE_CASE__ = image_processor(__A , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processor(images=__A , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self :Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = TvltProcessor(image_processor=__A , feature_extractor=__A )
SCREAMING_SNAKE_CASE__ = np.ones([1_2000] )
SCREAMING_SNAKE_CASE__ = np.ones([3, 224, 224] )
SCREAMING_SNAKE_CASE__ = processor(audio=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self :Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = TvltProcessor(image_processor=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 6
|
'''simple docstring'''
import math
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: float = 1 / 1_23_45 ):
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : List[str] = 3
while True:
__SCREAMING_SNAKE_CASE : int = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = int(_lowerCamelCase )
total_partitions += 1
if check_partition_perfect(_lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(f"{solution() = }")
| 578
| 0
|
import random
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = a[left_index]
__lowerCAmelCase = left_index + 1
for j in range(left_index + 1 , _A ):
if a[j] < pivot:
__lowerCAmelCase, __lowerCAmelCase = a[i], a[j]
i += 1
__lowerCAmelCase, __lowerCAmelCase = a[i - 1], a[left_index]
return i - 1
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if left < right:
__lowerCAmelCase = random.randint(_A , right - 1 )
__lowerCAmelCase, __lowerCAmelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__lowerCAmelCase = partition(_A , _A , _A )
quick_sort_random(
_A , _A , _A ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_A , pivot_index + 1 , _A ) # recursive quicksort to the right of the pivot point
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = input('Enter numbers separated by a comma:\n' ).strip()
__lowerCAmelCase = [int(_A ) for item in user_input.split(',' )]
quick_sort_random(_A , 0 , len(_A ) )
print(_A )
if __name__ == "__main__":
main()
| 719
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
lowerCamelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
lowerCamelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
lowerCamelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 102
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
__lowerCamelCase = [], []
while len(_a ) > 1:
__lowerCamelCase = min(_a ), max(_a )
start.append(_a )
end.append(_a )
collection.remove(_a )
collection.remove(_a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__UpperCAmelCase =input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase =[int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 546
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def lowercase ( _a=None ,_a=None ) -> List[Any]:
return field(default_factory=lambda: default ,metadata=_a )
@dataclass
class UpperCAmelCase__ :
snake_case_ = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
snake_case_ = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
snake_case_ = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Benchmark training of model'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Verbose memory tracing'''} )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Trace memory line by line'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Save result to a CSV file'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Save all print statements in a log file'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Whether to print environment information'''} )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
snake_case_ = field(
default=F'inference_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
snake_case_ = field(
default=F'inference_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
snake_case_ = field(
default=F'train_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
snake_case_ = field(
default=F'train_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
snake_case_ = field(
default=F'env_info_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
snake_case_ = field(
default=F'log_{round(time() )}.csv' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
snake_case_ = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def snake_case_ ( self ):
"""simple docstring"""
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , A__ , )
def snake_case_ ( self ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case_ ( self ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def snake_case_ ( self ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 137
| 0
|
'''simple docstring'''
def UpperCAmelCase ( A : list[int] , A : list[int] ):
SCREAMING_SNAKE_CASE : List[str] = len(A )
print('''The following activities are selected:''' )
# The first activity is always selected
SCREAMING_SNAKE_CASE : str = 0
print(A , end=''',''' )
# Consider rest of the activities
for j in range(A ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(A , end=''',''' )
SCREAMING_SNAKE_CASE : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Tuple = [1, 3, 0, 5, 8, 5]
lowerCAmelCase_ : Any = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 706
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Dict=10 , lowerCAmelCase__ : List[Any]=[10, 20, 30, 40] , lowerCAmelCase__ : List[Any]=[1, 1, 2, 1] , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]="relu" , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Tuple=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : str = embeddings_size
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase__ )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowercase ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TFRegNetModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Any = TFRegNetForImageClassification(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowerCAmelCase : str = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_lowerCAmelCase : str = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : str = False
_lowerCAmelCase : str = False
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __lowercase ( self : List[str] ):
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def __lowercase ( self : int ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __lowercase ( self : Dict ):
"""simple docstring"""
pass
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __lowercase ( self : Dict ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : int = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_type
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str={} ):
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ ).to_tuple()
def recursive_check(lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ):
if isinstance(lowerCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCAmelCase__ , lowerCAmelCase__ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {'''output_hidden_states''': True} )
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {'''output_hidden_states''': True} )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __lowercase ( self : Any ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = TFRegNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase ( ):
SCREAMING_SNAKE_CASE : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : List[str] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : int = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 )
| 464
| 0
|
'''simple docstring'''
import math
import sys
def a_ ( __UpperCAmelCase ) -> str:
"""simple docstring"""
snake_case: Tuple =''
try:
with open(__snake_case , 'rb' ) as binary_file:
snake_case: List[str] =binary_file.read()
for dat in data:
snake_case: Optional[Any] =f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def a_ ( __UpperCAmelCase ) -> str:
"""simple docstring"""
snake_case: Optional[int] ={'0': '0', '1': '1'}
snake_case , snake_case: str ='', ''
snake_case: List[str] =len(__snake_case )
for i in range(len(__snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case: Tuple =lexicon[curr_string]
result += last_match_id
snake_case: Optional[int] =last_match_id + '0'
if math.loga(__snake_case ).is_integer():
snake_case: Union[str, Any] ={}
for curr_key in list(__snake_case ):
snake_case: Any =lexicon.pop(__snake_case )
snake_case: Union[str, Any] =new_lex
snake_case: int =last_match_id + '1'
index += 1
snake_case: Dict =''
return result
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> None:
"""simple docstring"""
snake_case: List[str] =8
try:
with open(__snake_case , 'wb' ) as opened_file:
snake_case: int =[
to_write[i : i + byte_length]
for i in range(0 , len(__snake_case ) , __snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__snake_case , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def a_ ( __UpperCAmelCase ) -> str:
"""simple docstring"""
snake_case: Dict =0
for letter in data_bits:
if letter == "1":
break
counter += 1
snake_case: Optional[int] =data_bits[counter:]
snake_case: Union[str, Any] =data_bits[counter + 1 :]
return data_bits
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> None:
"""simple docstring"""
snake_case: Optional[Any] =read_file_binary(__snake_case )
snake_case: List[Any] =remove_prefix(__snake_case )
snake_case: str =decompress_data(__snake_case )
write_file_binary(__snake_case , __snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 350
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317
| 0
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=5 ) -> Tuple:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("<mask>" ) == 1
_UpperCAmelCase = torch.tensor(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) ).unsqueeze(0 ) # Batch size 1
_UpperCAmelCase = model(_lowerCAmelCase )[0] # The last hidden-state is the first element of the output tuple
_UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCAmelCase = logits[0, masked_index, :]
_UpperCAmelCase = logits.softmax(dim=0 )
_UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_lowerCAmelCase , dim=0 )
_UpperCAmelCase = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowerCAmelCase ) )] )
_UpperCAmelCase = tokenizer.mask_token
_UpperCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
_UpperCAmelCase = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(_lowerCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(_lowerCAmelCase ) , _lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowerCAmelCase , _lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__lowerCAmelCase = CamembertTokenizer.from_pretrained("camembert-base")
__lowerCAmelCase = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
__lowerCAmelCase = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 129
|
def __lowerCamelCase ( _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = [0] * len(_lowerCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCAmelCase )
while queue:
_UpperCAmelCase = queue.pop(0 )
cnt += 1
topo.append(_lowerCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCAmelCase )
if cnt != len(_lowerCAmelCase ):
print("Cycle exists" )
else:
print(_lowerCAmelCase )
# Adjacency List of Graph
__lowerCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 129
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = "maskformer-swin"
UpperCAmelCase : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , A_=224 , A_=4 , A_=3 , A_=96 , A_=[2, 2, 6, 2] , A_=[3, 6, 12, 24] , A_=7 , A_=4.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=0.0_2 , A_=1e-5 , A_=None , A_=None , **A_ , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(lowercase_ )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase, lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 433
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 83
|
def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83
| 1
|
def __UpperCAmelCase ( __a : int ,__a : int ) -> str:
"""simple docstring"""
if not isinstance(__a ,__a ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__a ,__a ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
_a : List[Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__a )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> list:
__snake_case = len(_UpperCAmelCase )
__snake_case = []
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
__snake_case = True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
__snake_case = False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 69
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self : Union[str, Any] , snake_case__ : int ):
lowerCAmelCase__ = num_of_nodes
lowerCAmelCase__ = []
lowerCAmelCase__ = {}
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase__ = self.find_component(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : list[int] , snake_case__ : int , snake_case__ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case__ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase__ = self.find_component(snake_case__ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = edge
lowerCAmelCase__ = self.m_component[u]
lowerCAmelCase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = edge
lowerCAmelCase__ = self.m_component[u]
lowerCAmelCase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case__ , snake_case__ , snake_case__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowerCAmelCase__ = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = LayoutLMTokenizer
UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
lowerCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = """UNwant\u00E9d,running"""
lowerCAmelCase__ = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 674
| 0
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCAmelCase : List[str] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def SCREAMING_SNAKE_CASE__ ( snake_case : int=None )-> int:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase__ : Optional[int] = subparsers.add_parser("tpu-config" , description=_description )
else:
UpperCAmelCase__ : int = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
UpperCAmelCase__ : Union[str, Any] = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=snake_case , default=snake_case , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=snake_case , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=snake_case , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
UpperCAmelCase__ : Tuple = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=snake_case , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=snake_case )
return parser
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(snake_case ):
UpperCAmelCase__ : int = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase__ : Any = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase__ : List[Any] = defaults.commands
if not args.tpu_name:
UpperCAmelCase__ : Optional[Any] = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase__ : List[str] = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase__ : Any = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
UpperCAmelCase__ : Optional[int] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , snake_case ):
UpperCAmelCase__ : int = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
UpperCAmelCase__ : Tuple = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , snake_case ):
UpperCAmelCase__ : List[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase__ : Dict = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCAmelCase__ : int = "; ".join(snake_case )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase__ : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(snake_case )}' )
return
subprocess.run(snake_case )
print("Successfully setup pod." )
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = tpu_command_parser()
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
tpu_command_launcher(snake_case )
| 438
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = data
UpperCAmelCase__ : Node | None = None
class lowerCAmelCase__ :
def __init__( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = None
def __iter__( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.head
while self.head:
yield node.data
UpperCAmelCase__ : Optional[Any] = node.next
if node == self.head:
break
def __len__( self : Tuple ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ):
'''simple docstring'''
return "->".join(str(snake_case__ ) for item in iter(self ) )
def __a ( self : Optional[Any] , snake_case__ : Any ):
'''simple docstring'''
self.insert_nth(len(self ) , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Any ):
'''simple docstring'''
self.insert_nth(0 , snake_case__ )
def __a ( self : Any , snake_case__ : int , snake_case__ : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
UpperCAmelCase__ : Any = Node(snake_case__ )
if self.head is None:
UpperCAmelCase__ : Union[str, Any] = new_node # first node points itself
UpperCAmelCase__ : List[str] = new_node
elif index == 0: # insert at head
UpperCAmelCase__ : Optional[Any] = self.head
UpperCAmelCase__ : List[Any] = new_node
else:
UpperCAmelCase__ : Union[str, Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Dict = temp.next
UpperCAmelCase__ : Tuple = temp.next
UpperCAmelCase__ : str = new_node
if index == len(self ) - 1: # insert at tail
UpperCAmelCase__ : Union[str, Any] = new_node
def __a ( self : List[Any] ):
'''simple docstring'''
return self.delete_nth(0 )
def __a ( self : Optional[Any] ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __a ( self : Tuple , snake_case__ : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
UpperCAmelCase__ : Union[str, Any] = self.head
if self.head == self.tail: # just one node
UpperCAmelCase__ : str = None
elif index == 0: # delete head node
UpperCAmelCase__ : List[Any] = self.tail.next.next
UpperCAmelCase__ : Dict = self.head.next
else:
UpperCAmelCase__ : List[Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Tuple = temp.next
UpperCAmelCase__ : Dict = temp.next
UpperCAmelCase__ : Dict = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCAmelCase__ : int = temp
return delete_node.data
def __a ( self : Any ):
'''simple docstring'''
return len(self ) == 0
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : str = CircularLinkedList()
assert len(snake_case ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case ) == i
circular_linked_list.insert_nth(snake_case , i + 1 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 438
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__SCREAMING_SNAKE_CASE = i + 1
else:
__SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 248
|
"""simple docstring"""
import baseaa
def _lowerCAmelCase ( UpperCamelCase_ ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def _lowerCAmelCase ( UpperCamelCase_ ):
return baseaa.baadecode(UpperCamelCase_ ).decode("""utf-8""" )
if __name__ == "__main__":
__magic_name__ = "Hello World!"
__magic_name__ = baseaa_encode(test)
print(encoded)
__magic_name__ = baseaa_decode(encoded)
print(decoded)
| 248
| 1
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 ) -> list:
"""simple docstring"""
__UpperCAmelCase : List[str] = length or len(UpperCamelCase )
__UpperCAmelCase : List[str] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__UpperCAmelCase , __UpperCAmelCase : int = list_data[i + 1], list_data[i]
__UpperCAmelCase : List[Any] = True
return list_data if not swapped else bubble_sort(UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A = logging.get_logger(__name__)
class a__ ( __magic_name__ ):
lowercase_ = ["input_features", "is_longer"]
def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = top_db
__UpperCAmelCase : Optional[Any] = truncation
__UpperCAmelCase : str = padding
__UpperCAmelCase : int = fft_window_size
__UpperCAmelCase : str = (fft_window_size >> 1) + 1
__UpperCAmelCase : List[Any] = hop_length
__UpperCAmelCase : Optional[Any] = max_length_s
__UpperCAmelCase : Tuple = max_length_s * sampling_rate
__UpperCAmelCase : str = sampling_rate
__UpperCAmelCase : int = frequency_min
__UpperCAmelCase : Optional[Any] = frequency_max
__UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , )
__UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Dict = copy.deepcopy(self.__dict__)
__UpperCAmelCase : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None):
"""simple docstring"""
__UpperCAmelCase : List[Any] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , )
return log_mel_spectrogram.T
def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : str = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : Dict = [0]
# randomly choose index for each part
__UpperCAmelCase : Dict = np.random.choice(ranges[0])
__UpperCAmelCase : List[str] = np.random.choice(ranges[1])
__UpperCAmelCase : List[Any] = np.random.choice(ranges[2])
__UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :]
__UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :]
__UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :])
__UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy()
__UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCAmelCase : List[str] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length
__UpperCAmelCase : int = np.random.randint(0 , overflow + 1)
__UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length]
__UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
__UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
__UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCAmelCase : Tuple = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0)
__UpperCAmelCase : Any = False
else:
__UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented")
else:
__UpperCAmelCase : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_))
__UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
__UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_))
__UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0)
if truncation == "fusion":
__UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
__UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
__UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ):
"""simple docstring"""
__UpperCAmelCase : int = truncation if truncation is not None else self.truncation
__UpperCAmelCase : Optional[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
__UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}")
__UpperCAmelCase : str = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray):
__UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa)
elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__UpperCAmelCase : int = [np.asarray(UpperCamelCase_)]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCAmelCase : Optional[int] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_)
for waveform in raw_speech
]
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_)
is_longer.append(UpperCamelCase_)
if truncation == "fusion" and sum(UpperCamelCase_) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_))
__UpperCAmelCase : Optional[int] = True
if isinstance(input_mel[0] , UpperCamelCase_):
__UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
__UpperCAmelCase : List[str] = [[longer] for longer in is_longer]
__UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer}
__UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_)
if return_tensors is not None:
__UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_)
return input_features
| 77
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase : Tuple = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ):
'''simple docstring'''
if attention_mask is None:
_SCREAMING_SNAKE_CASE = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_SCREAMING_SNAKE_CASE = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __snake_case:
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=32 , A_=2 , A_=1 , A_=0 , A_=0.02 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = initializer_range
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_SCREAMING_SNAKE_CASE = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_SCREAMING_SNAKE_CASE = shift_tokens_right(A_ , 1 , 2 )
_SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=A_ , )
_SCREAMING_SNAKE_CASE = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def A ( self , A_ , A_ , A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 20
_SCREAMING_SNAKE_CASE = model_class_name(A_ )
_SCREAMING_SNAKE_CASE = model.encode(inputs_dict['''input_ids'''] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , A_ , A_ )
_SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , )
_SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , A_ , decoder_attention_mask=A_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A_ , )
_SCREAMING_SNAKE_CASE = model.decode(A_ , A_ )
_SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def A ( self , A_ , A_ , A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 20
_SCREAMING_SNAKE_CASE = model_class_name(A_ )
_SCREAMING_SNAKE_CASE = model.encode(inputs_dict['''input_ids'''] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , A_ , A_ )
_SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , )
_SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , A_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A_ , decoder_position_ids=A_ , )
_SCREAMING_SNAKE_CASE = model.decode(A_ , A_ , decoder_attention_mask=A_ )
_SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __snake_case( unittest.TestCase ):
_A = 99
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_SCREAMING_SNAKE_CASE = input_ids.shape[0]
_SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_config_and_data()
_SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration(A_ )
_SCREAMING_SNAKE_CASE = lm_model(input_ids=A_ )
_SCREAMING_SNAKE_CASE = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration(A_ )
_SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE = lm_model(input_ids=A_ , decoder_input_ids=A_ )
_SCREAMING_SNAKE_CASE = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE = shift_tokens_right(A_ , 1 , 2 )
_SCREAMING_SNAKE_CASE = np.equal(A_ , 1 ).astype(np.floataa ).sum()
_SCREAMING_SNAKE_CASE = np.equal(A_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(A_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __snake_case( __A , unittest.TestCase , __A ):
_A = True
_A = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_A = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxBlenderbotModelTester(self )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A_ , A_ , A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A_ , A_ , A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A_ , A_ )
_SCREAMING_SNAKE_CASE = model_class(A_ )
@jax.jit
def encode_jitted(A_ , A_=None , **A_ ):
return model.encode(input_ids=A_ , attention_mask=A_ )
with self.subTest('''JIT Enabled''' ):
_SCREAMING_SNAKE_CASE = encode_jitted(**A_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE = encode_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE = model_class(A_ )
_SCREAMING_SNAKE_CASE = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_SCREAMING_SNAKE_CASE = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(A_ , A_ , A_ ):
return model.decode(
decoder_input_ids=A_ , decoder_attention_mask=A_ , encoder_outputs=A_ , )
with self.subTest('''JIT Enabled''' ):
_SCREAMING_SNAKE_CASE = decode_jitted(**A_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE = decode_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_SCREAMING_SNAKE_CASE = np.ones((1, 1) ) * model.config.eos_token_id
_SCREAMING_SNAKE_CASE = model(A_ )
self.assertIsNotNone(A_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
_SCREAMING_SNAKE_CASE = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
_SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=A_ )
_SCREAMING_SNAKE_CASE = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
_SCREAMING_SNAKE_CASE = ['''Sam''']
_SCREAMING_SNAKE_CASE = tokenizer(A_ , return_tensors='''jax''' )
_SCREAMING_SNAKE_CASE = model.generate(**A_ , **A_ )
_SCREAMING_SNAKE_CASE = '''Sam is a great name. It means "sun" in Gaelic.'''
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(A_ , **A_ )
assert generated_txt[0].strip() == tgt_text
| 168
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(A_ ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(A_ ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(A_ ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(A_ ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(A_ ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_SCREAMING_SNAKE_CASE = '''fp16'''
self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_SCREAMING_SNAKE_CASE = '''fp16'''
self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_SCREAMING_SNAKE_CASE = '''fp16'''
self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_SCREAMING_SNAKE_CASE = '''fp16'''
self.assertFalse(is_safetensors_compatible(A_ , variant=A_ ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_SCREAMING_SNAKE_CASE = '''fp16'''
self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_SCREAMING_SNAKE_CASE = '''fp16'''
self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_SCREAMING_SNAKE_CASE = '''fp16'''
self.assertFalse(is_safetensors_compatible(A_ , variant=A_ ) )
| 168
| 1
|
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18
| 0
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : float | Decimal , UpperCAmelCase_ : float = 1_0**-1_0 ) -> float:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = a
while True:
_UpperCamelCase : Optional[Any] = Decimal(UpperCAmelCase_ ) - (
Decimal(eval(UpperCAmelCase_ ) ) / Decimal(eval(str(diff(UpperCAmelCase_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(UpperCAmelCase_ ) ) < precision: # noqa: S307
return float(UpperCAmelCase_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 648
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] )-> Dict:
A__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
A__ = json.loads(open(a_ ).read() )
if not params:
raise ValueError(
f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith('''.pt''' ):
A__ = args.output + '''.pt'''
A__ = OrderedDict()
with tf.device('''/CPU:0''' ):
A__ = tf.train.load_checkpoint(args.tf_model_dir )
A__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
A__ = reader.get_tensor(a_ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
A__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
A__ = 8
A__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(a_ )
elif key_name.startswith('''model/moe''' ):
A__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
A__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(a_ )
elif key_name.endswith('''/softmlp/kernel''' ):
A__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(a_ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
A__ = key_name[-9:-7]
for i in range(1_6 ):
A__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
A__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
A__ = torch.tensor(a_ )
elif key_name.startswith('''model/mlp''' ):
A__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
A__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(a_ )
elif key_name.endswith('''/p1/bias''' ):
A__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(a_ )
elif key_name.endswith('''/p2/kernel''' ):
A__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(a_ )
elif key_name.endswith('''/p2/bias''' ):
A__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(a_ )
elif key_name.startswith('''model/ln''' ):
A__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(a_ )
elif key_name.endswith('''/g''' ):
A__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(a_ )
elif key_name.startswith('''model/att''' ):
A__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
A__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
A__ = state[:, 0, :, :]
A__ = state[:, 1, :, :]
A__ = state[:, 2, :, :]
A__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
A__ = torch.tensor(a_ )
A__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
A__ = torch.tensor(a_ )
A__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
A__ = torch.tensor(a_ )
elif key_name.endswith('''/o/kernel''' ):
A__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
A__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(a_ )
elif key_name.startswith('''model/an''' ):
A__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A__ = '''model.blocks.%d.self_attn.norm.bias''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(a_ )
elif key_name.endswith('''/g''' ):
A__ = '''model.blocks.%d.self_attn.norm.weight''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(a_ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
A__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
A__ = '''model.%s.weight''' % nlayer
A__ = vnp.copy() # same in embedded
A__ = torch.tensor(a_ )
if key_name.startswith('''model/wte''' ):
A__ = '''lm_head.weight'''
A__ = vnp.copy() # same in embedded
A__ = torch.tensor(a_ )
elif key_name.startswith('''model/wob''' ):
A__ = '''final_logits_bias'''
A__ = vnp.copy() # same in embedded
A__ = state.reshape((1, -1) )
A__ = torch.tensor(a_ )
elif key_name == "model/dense/kernel":
A__ = '''model.last_project.weight'''
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(a_ )
elif key_name == "model/dense_1/bias":
A__ = '''model.last_project.bias'''
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(a_ )
torch.save(a_ , args.output )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
_lowercase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 632
|
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def SCREAMING_SNAKE_CASE ( a_ : Path , a_ : list ):
__a = '\n'.join(a_ )
Path(a_ ).open('w' ).writelines(a_ )
UpperCAmelCase_ = "patrickvonplaten/t5-tiny-random"
UpperCAmelCase_ = "sshleifer/bart-tiny-random"
UpperCAmelCase_ = "sshleifer/tiny-mbart"
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __lowercase ( __magic_name__ ):
def UpperCamelCase__ ( self , UpperCamelCase ) -> Dict:
__a = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__a = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__a = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(UpperCamelCase , UpperCamelCase )
__a = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
__a = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__a = f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(UpperCamelCase , 'argv' , UpperCamelCase ):
run_generate()
assert Path(UpperCamelCase ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase__ ( self ) -> int:
self.run_eval_tester(UpperCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase__ ( self , UpperCamelCase ) -> Union[str, Any]:
self.run_eval_tester(UpperCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase__ ( self , UpperCamelCase ) -> Optional[Any]:
__a = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__a = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__a = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
__a = Path(self.get_auto_remove_tmp_dir() )
__a = str(tmp_dir / 'scores.json' )
__a = str(tmp_dir / 'val.target' )
_dump_articles(UpperCamelCase , text['en'] )
_dump_articles(UpperCamelCase , text['de'] )
__a = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__a = f"\n run_eval_search.py\n {model}\n {str(UpperCamelCase )}\n {str(UpperCamelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(UpperCamelCase , 'argv' , UpperCamelCase ):
with CaptureStdout() as cs:
run_search()
__a = [' num_beams | length_penalty', model, 'Best score args']
__a = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(UpperCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCamelCase ).exists()
os.remove(Path(UpperCamelCase ) )
| 539
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """layoutlmv3"""
def __init__( self , __UpperCAmelCase=5_02_65 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , __UpperCAmelCase=1_28 , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=1_28 , __UpperCAmelCase=64 , __UpperCAmelCase=2_56 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2_24 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Optional[int]:
super().__init__(
vocab_size=__UpperCAmelCase , hidden_size=__UpperCAmelCase , num_hidden_layers=__UpperCAmelCase , num_attention_heads=__UpperCAmelCase , intermediate_size=__UpperCAmelCase , hidden_act=__UpperCAmelCase , hidden_dropout_prob=__UpperCAmelCase , attention_probs_dropout_prob=__UpperCAmelCase , max_position_embeddings=__UpperCAmelCase , type_vocab_size=__UpperCAmelCase , initializer_range=__UpperCAmelCase , layer_norm_eps=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
a_ = max_ad_position_embeddings
a_ = coordinate_size
a_ = shape_size
a_ = has_relative_attention_bias
a_ = rel_pos_bins
a_ = max_rel_pos
a_ = has_spatial_attention_bias
a_ = rel_ad_pos_bins
a_ = max_rel_ad_pos
a_ = text_embed
a_ = visual_embed
a_ = input_size
a_ = num_channels
a_ = patch_size
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = version.parse("""1.12""" )
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
])
@property
def UpperCAmelCase__ ( self) ->float:
return 1E-5
@property
def UpperCAmelCase__ ( self) ->int:
return 12
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 3 , __UpperCAmelCase = 40 , __UpperCAmelCase = 40 , ) ->Mapping[str, Any]:
setattr(processor.image_processor , "apply_ocr" , __UpperCAmelCase)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a_ = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a_ = processor.tokenizer.num_special_tokens_to_add(__UpperCAmelCase)
a_ = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase)
# Generate dummy inputs according to compute batch and sequence
a_ = [[" ".join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
a_ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
a_ = self._generate_dummy_images(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = dict(
processor(
__UpperCAmelCase , text=__UpperCAmelCase , boxes=__UpperCAmelCase , return_tensors=__UpperCAmelCase , ))
return inputs
| 210
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Dict = ConsistencyModelPipeline
a_ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ : List[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a_ : int = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCAmelCase__ ( self) ->Dict:
a_ = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def UpperCAmelCase__ ( self , __UpperCAmelCase=False) ->Optional[Any]:
if class_cond:
a_ = self.dummy_cond_unet
else:
a_ = self.dummy_uncond_unet
# Default to CM multistep sampler
a_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=0) ->Any:
if str(__UpperCAmelCase).startswith("mps"):
a_ = torch.manual_seed(__UpperCAmelCase)
else:
a_ = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a_ = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = ConsistencyModelPipeline(**__UpperCAmelCase)
a_ = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_dummy_inputs(__UpperCAmelCase)
a_ = pipe(**__UpperCAmelCase).images
assert image.shape == (1, 32, 32, 3)
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCAmelCase__ ( self) ->str:
a_ = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components(class_cond=__UpperCAmelCase)
a_ = ConsistencyModelPipeline(**__UpperCAmelCase)
a_ = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_dummy_inputs(__UpperCAmelCase)
a_ = 0
a_ = pipe(**__UpperCAmelCase).images
assert image.shape == (1, 32, 32, 3)
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = ConsistencyModelPipeline(**__UpperCAmelCase)
a_ = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_dummy_inputs(__UpperCAmelCase)
a_ = 1
a_ = None
a_ = pipe(**__UpperCAmelCase).images
assert image.shape == (1, 32, 32, 3)
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components(class_cond=__UpperCAmelCase)
a_ = ConsistencyModelPipeline(**__UpperCAmelCase)
a_ = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_dummy_inputs(__UpperCAmelCase)
a_ = 1
a_ = None
a_ = 0
a_ = pipe(**__UpperCAmelCase).images
assert image.shape == (1, 32, 32, 3)
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=(1, 3, 64, 64)) ->Tuple:
a_ = torch.manual_seed(__UpperCAmelCase)
a_ = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
a_ = self.get_fixed_latents(seed=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase , shape=__UpperCAmelCase)
a_ = latents
return inputs
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=(1, 3, 64, 64)) ->Dict:
if type(__UpperCAmelCase) == str:
a_ = torch.device(__UpperCAmelCase)
a_ = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a_ = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase)
return latents
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
a_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase)
pipe.to(torch_device=__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_inputs()
a_ = pipe(**__UpperCAmelCase).images
assert image.shape == (1, 64, 64, 3)
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCAmelCase__ ( self) ->Dict:
a_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
a_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase)
pipe.to(torch_device=__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_inputs()
a_ = 1
a_ = None
a_ = pipe(**__UpperCAmelCase).images
assert image.shape == (1, 64, 64, 3)
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
a_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase)
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase):
a_ = pipe(**__UpperCAmelCase).images
assert image.shape == (1, 64, 64, 3)
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
a_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase)
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase)
a_ = 1
a_ = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase):
a_ = pipe(**__UpperCAmelCase).images
assert image.shape == (1, 64, 64, 3)
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 210
| 1
|
import re
from filelock import FileLock
try:
import nltk
lowercase = True
except (ImportError, ModuleNotFoundError):
lowercase = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __lowerCAmelCase ( UpperCAmelCase__ : str ) -> str:
re.sub("""<n>""" , """""" , UpperCAmelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCAmelCase__ ) )
| 272
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
| 1
|
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_UpperCamelCase : List[Any] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_UpperCamelCase : int = get_tests_dir("fixtures/vocab.json")
_UpperCamelCase : Dict = get_tests_dir("fixtures")
class _snake_case ( unittest.TestCase ):
SCREAMING_SNAKE_CASE : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 0
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = WavaVecaConfig()
lowerCAmelCase = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) )
lowerCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = WavaVecaFeatureExtractor()
lowerCAmelCase = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
lowerCAmelCase = WavaVecaProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# drop `processor_class` in tokenizer
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 'r' ) as f:
lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
config_dict.pop('processor_class' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 'w' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = WavaVecaFeatureExtractor()
lowerCAmelCase = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
lowerCAmelCase = WavaVecaProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# drop `processor_class` in feature extractor
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 'r' ) as f:
lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
config_dict.pop('processor_class' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 'w' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
# copy relevant files
copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 'w' ) as f:
f.write('{}' )
lowerCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
lowerCAmelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
lowerCAmelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
lowerCAmelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.txt' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase = CustomTokenizer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = CustomProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Dict = False
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : str = False
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = '''AutoFeatureExtractor'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''AutoTokenizer'''
SCREAMING_SNAKE_CASE : Any = False
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local classes.
lowerCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCAmelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCAmelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class _snake_case ( unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
lowerCAmelCase = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = WavaVecaProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_SCREAMING_SNAKE_CASE , 'test-processor' ) , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowerCAmelCase = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , _SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = WavaVecaProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_SCREAMING_SNAKE_CASE , 'test-processor-org' ) , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='valid_org' , )
lowerCAmelCase = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , _SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCAmelCase = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.txt' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase = CustomTokenizer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = CustomProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
lowerCAmelCase = Repository(_SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'tokenizer_config.json' ) ) as f:
lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , 'custom_processing.py' ) ) )
repo.push_to_hub()
lowerCAmelCase = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 514
|
'''simple docstring'''
_UpperCamelCase : Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_UpperCamelCase : str = [{"type": "code", "content": INSTALL_CONTENT}]
_UpperCamelCase : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 514
| 1
|
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCamelCase__ :
def __init__( self : Any , A_ : int ):
'''simple docstring'''
if isinstance(A_ , A_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__lowercase = deepcopy(A_ )
elif os.path.exists(A_ ):
with io.open(A_ , """r""" , encoding="""utf-8""" ) as f:
__lowercase = json.load(A_ )
else:
try:
__lowercase = baseaa.urlsafe_baadecode(A_ ).decode("""utf-8""" )
__lowercase = json.loads(A_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
__lowercase = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.get_value("""zero_optimization.stage""" , -1 )
# offload
__lowercase = False
if self.is_zeroa() or self.is_zeroa():
__lowercase = set(["""cpu""", """nvme"""] )
__lowercase = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__lowercase = True
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : Dict ):
'''simple docstring'''
__lowercase = self.config
# find the config node of interest if it exists
__lowercase = ds_key_long.split(""".""" )
__lowercase = nodes.pop()
for node in nodes:
__lowercase = config.get(A_ )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , A_ : int , A_ : List[str]=None ):
'''simple docstring'''
__lowercase , __lowercase = self.find_config_node(A_ )
if config is None:
return default
return config.get(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : Dict , A_ : str=False ):
'''simple docstring'''
__lowercase = self.config
# find the config node of interest if it exists
__lowercase = ds_key_long.split(""".""" )
for node in nodes:
__lowercase = config
__lowercase = config.get(A_ )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(A_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : List[Any] ):
'''simple docstring'''
__lowercase = self.get_value(A_ )
return False if value is None else bool(A_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : List[Any] ):
'''simple docstring'''
__lowercase = self.get_value(A_ )
return False if value is None else not bool(A_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
return self._offload
class lowerCamelCase__ :
def __init__( self : Union[str, Any] , A_ : Union[str, Any] ):
'''simple docstring'''
__lowercase = engine
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : str , **A_ : Any ):
'''simple docstring'''
self.engine.backward(A_ , **A_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCamelCase__ ( _a ):
def __init__( self : Optional[int] , A_ : str ):
'''simple docstring'''
super().__init__(A_ , device_placement=A_ , scaler=A_ )
__lowercase = hasattr(self.optimizer , """overflow""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : str=None ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCamelCase__ ( _a ):
def __init__( self : List[str] , A_ : Dict , A_ : List[str] ):
'''simple docstring'''
super().__init__(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCamelCase__ :
def __init__( self : Dict , A_ : Any , A_ : int=0.0_01 , A_ : Union[str, Any]=0 , **A_ : Dict ):
'''simple docstring'''
__lowercase = params
__lowercase = lr
__lowercase = weight_decay
__lowercase = kwargs
class lowerCamelCase__ :
def __init__( self : Union[str, Any] , A_ : Union[str, Any] , A_ : Any=None , A_ : str=0 , **A_ : int ):
'''simple docstring'''
__lowercase = optimizer
__lowercase = total_num_steps
__lowercase = warmup_num_steps
__lowercase = kwargs
| 616
|
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self : Optional[Any] , A_ : Dict , A_ : str , A_ : Any ):
'''simple docstring'''
__lowercase = None
__lowercase = None
__lowercase = graph
self._normalize_graph(A_ , A_ )
__lowercase = len(A_ )
__lowercase = None
def SCREAMING_SNAKE_CASE_ ( self : Dict , A_ : str , A_ : List[Any] ):
'''simple docstring'''
if sources is int:
__lowercase = [sources]
if sinks is int:
__lowercase = [sinks]
if len(A_ ) == 0 or len(A_ ) == 0:
return
__lowercase = sources[0]
__lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A_ ) > 1 or len(A_ ) > 1:
__lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__lowercase = max_input_flow
__lowercase = 0
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__lowercase = max_input_flow
__lowercase = size - 1
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : Dict ):
'''simple docstring'''
__lowercase = algorithm(self )
class lowerCamelCase__ :
def __init__( self : Tuple , A_ : Optional[int] ):
'''simple docstring'''
__lowercase = flow_network
__lowercase = flow_network.verticesCount
__lowercase = flow_network.sourceIndex
__lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__lowercase = flow_network.graph
__lowercase = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
if not self.executed:
self._algorithm()
__lowercase = True
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
class lowerCamelCase__ ( _a ):
def __init__( self : Union[str, Any] , A_ : int ):
'''simple docstring'''
super().__init__(A_ )
# use this to save your result
__lowercase = -1
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase__ ( _a ):
def __init__( self : List[str] , A_ : Tuple ):
'''simple docstring'''
super().__init__(A_ )
__lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
__lowercase = [0] * self.verticies_count
__lowercase = [0] * self.verticies_count
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__lowercase = 0
while i < len(A_ ):
__lowercase = vertices_list[i]
__lowercase = self.heights[vertex_index]
self.process_vertex(A_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A_ ) )
__lowercase = 0
else:
i += 1
__lowercase = sum(self.preflow[self.source_index] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Optional[int] ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A_ , A_ )
self.relabel(A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , A_ : Union[str, Any] , A_ : List[str] ):
'''simple docstring'''
__lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Optional[Any] ):
'''simple docstring'''
__lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__lowercase = self.heights[to_index]
if min_height is not None:
__lowercase = min_height + 1
if __name__ == "__main__":
UpperCAmelCase__ =[0]
UpperCAmelCase__ =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase__ =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase__ =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase__ =flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 616
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( _a : str , _a : list[str] | None = None , _a : dict[str, float] | None = None , _a : bool = False , ):
'''simple docstring'''
UpperCAmelCase_ : int = cipher_alphabet or [chr(_a ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCAmelCase_ : int = {
"""a""": 0.0_8_4_9_7,
"""b""": 0.0_1_4_9_2,
"""c""": 0.0_2_2_0_2,
"""d""": 0.0_4_2_5_3,
"""e""": 0.1_1_1_6_2,
"""f""": 0.0_2_2_2_8,
"""g""": 0.0_2_0_1_5,
"""h""": 0.0_6_0_9_4,
"""i""": 0.0_7_5_4_6,
"""j""": 0.0_0_1_5_3,
"""k""": 0.0_1_2_9_2,
"""l""": 0.0_4_0_2_5,
"""m""": 0.0_2_4_0_6,
"""n""": 0.0_6_7_4_9,
"""o""": 0.0_7_5_0_7,
"""p""": 0.0_1_9_2_9,
"""q""": 0.0_0_0_9_5,
"""r""": 0.0_7_5_8_7,
"""s""": 0.0_6_3_2_7,
"""t""": 0.0_9_3_5_6,
"""u""": 0.0_2_7_5_8,
"""v""": 0.0_0_9_7_8,
"""w""": 0.0_2_5_6_0,
"""x""": 0.0_0_1_5_0,
"""y""": 0.0_1_9_9_4,
"""z""": 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
UpperCAmelCase_ : Tuple = frequencies_dict
if not case_sensitive:
UpperCAmelCase_ : Any = ciphertext.lower()
# Chi squared statistic values
UpperCAmelCase_ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_a ) ):
UpperCAmelCase_ : Optional[int] = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCAmelCase_ : Any = (alphabet_letters.index(letter.lower() ) - shift) % len(
_a )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCAmelCase_ : int = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCAmelCase_ : Tuple = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ : int = decrypted_with_shift.lower().count(_a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ : List[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ : List[str] = decrypted_with_shift.count(_a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ : Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCAmelCase_ : List[str] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_a : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCAmelCase_ : int = min(
_a , key=_a , )
# Get all the data from the most likely cipher (key, decoded message)
(
UpperCAmelCase_
) : Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 721
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: List[Any]=7 ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: int=True ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Optional[int]=99 ,lowerCamelCase_: Union[str, Any]=32 ,lowerCamelCase_: Dict=5 ,lowerCamelCase_: List[str]=4 ,lowerCamelCase_: Any=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Any=512 ,lowerCamelCase_: List[Any]=16 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: List[Any]=0.0_2 ,lowerCamelCase_: Dict=4 ,) -> Any:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_attention_mask
UpperCAmelCase_ : str = use_token_type_ids
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[Any] = num_choices
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = None
if self.use_attention_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Any = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=lowerCamelCase_ ,)
return config, input_ids, attention_mask
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : Tuple = FlaxDistilBertModelTester(self )
@slow
def A__ ( self: Optional[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : int = model_class_name.from_pretrained("""distilbert-base-uncased""" )
UpperCAmelCase_ : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Dict = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
UpperCAmelCase_ : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCAmelCase_ : Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )[0]
UpperCAmelCase_ : Tuple = (1, 11, 768)
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,lowerCamelCase_ ,atol=1e-4 ) )
| 322
| 0
|
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = [int(a__ ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(a__ ) == 4 and all(0 <= int(a__ ) <= 254 for octet in octets )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = input().strip()
_UpperCAmelCase : Optional[Any] = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 362
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 691
| 0
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def _lowercase ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def _lowercase ( ) -> int:
"""simple docstring"""
return __name__.split("." )[0]
def _lowercase ( ) -> str:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def _lowercase ( ) -> int:
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_UpperCamelCase = logging.StreamHandler() # Set sys.stderr as stream.
_UpperCamelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
_UpperCamelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_UpperCamelCase = False
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_UpperCamelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_UpperCamelCase = None
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
return log_levels
def _lowercase ( a__ : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
if name is None:
_UpperCamelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase__ )
def _lowercase ( ) -> Optional[Any]:
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowercase ( a__ : int ) -> int:
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase__ )
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
return set_verbosity(UpperCamelCase__ )
def _lowercase ( ) -> Tuple:
"""simple docstring"""
return set_verbosity(UpperCamelCase__ )
def _lowercase ( ) -> Tuple:
"""simple docstring"""
return set_verbosity(UpperCamelCase__ )
def _lowercase ( ) -> Any:
"""simple docstring"""
return set_verbosity(UpperCamelCase__ )
def _lowercase ( ) -> List[str]:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _lowercase ( a__ : logging.Handler ) -> str:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase__ )
def _lowercase ( a__ : logging.Handler ) -> Optional[int]:
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase__ )
def _lowercase ( ) -> Dict:
"""simple docstring"""
_configure_library_root_logger()
_UpperCamelCase = False
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
_configure_library_root_logger()
_UpperCamelCase = True
def _lowercase ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = _get_library_root_logger().handlers
for handler in handlers:
_UpperCamelCase = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCamelCase__ )
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase__ )
def _lowercase ( self : str , *a__ : Any , **a__ : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCamelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase__ , **UpperCamelCase__ )
__lowerCAmelCase = warning_advice
@functools.lru_cache(UpperCamelCase__ )
def _lowercase ( self : Dict , *a__ : str , **a__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.warning(*UpperCamelCase__ , **UpperCamelCase__ )
__lowerCAmelCase = warning_once
class lowerCamelCase_ :
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> str: # pylint: disable=unused-argument
"""simple docstring"""
_UpperCamelCase = args[0] if args else None
def __iter__( self ) -> List[str]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
def empty_fn(*lowerCamelCase_ , **lowerCamelCase_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> str:
"""simple docstring"""
return self
def __exit__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
"""simple docstring"""
return
class lowerCamelCase_ :
def __call__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*__A , **__A )
else:
return EmptyTqdm(*__A , **__A )
def lowercase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__A , **__A )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def _lowercase ( ) -> List[str]:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def _lowercase ( ) -> Optional[Any]:
"""simple docstring"""
global _tqdm_active
_UpperCamelCase = True
hf_hub_utils.enable_progress_bars()
def _lowercase ( ) -> int:
"""simple docstring"""
global _tqdm_active
_UpperCamelCase = False
hf_hub_utils.disable_progress_bars()
| 715
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
__lowercase : Union[str, Any] = AlbertTokenizer
__lowercase : Any = AlbertTokenizerFast
__lowercase : Dict = True
__lowercase : Union[str, Any] = True
__lowercase : str = True
def lowercase ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = AlbertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = "this is a test"
_UpperCamelCase = "this is a test"
return input_text, output_text
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = "<pad>"
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(lowerCamelCase_ ) , 3_00_00 )
def lowercase ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def lowercase ( self ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = "I was born in 92000, and this is falsé."
_UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = AlbertTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_UpperCamelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [48, 25, 21, 12_89] )
_UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = AlbertTokenizer(lowerCamelCase_ )
_UpperCamelCase = tokenizer.encode("sequence builders" )
_UpperCamelCase = tokenizer.encode("multi-sequence build" )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 589
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
__A = """bit"""
__A = ["""preactivation""", """bottleneck"""]
__A = ["""SAME""", """VALID"""]
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = global_padding
snake_case_ = num_groups
snake_case_ = drop_path_rate
snake_case_ = embedding_dynamic_padding
snake_case_ = output_stride
snake_case_ = width_factor
snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 187
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=2 , __UpperCamelCase=24 , __UpperCamelCase=16 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=None , __UpperCamelCase=2 , __UpperCamelCase=2 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = patch_size
snake_case_ = max_length
snake_case_ = num_mel_bins
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = frequency_stride
snake_case_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case_ = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case_ = frequency_out_dimension * time_out_dimension
snake_case_ = num_patches + 2
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, input_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = ASTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_values': input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__A = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ASTModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['input_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ASTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a():
'''simple docstring'''
snake_case_ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
snake_case_ , snake_case_ = torchaudio.load(lowercase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.default_feature_extractor
snake_case_ = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__UpperCamelCase )
snake_case_ = self.default_feature_extractor
snake_case_ , snake_case_ = prepare_audio()
snake_case_ = audio.squeeze().numpy()
snake_case_ = feature_extractor(__UpperCamelCase , sampling_rate=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 187
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27
| 1
|
import numpy
# List of input, output pairs
lowerCAmelCase_ = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
lowerCAmelCase_ = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
lowerCAmelCase_ = [2, 4, 1, 5]
lowerCAmelCase_ = len(train_data)
lowerCAmelCase_ = 0.009
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase="train" ) -> List[str]:
"""simple docstring"""
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[int] = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=m ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
snake_case_ : Optional[int] = 0.000_002
snake_case_ : List[str] = 0
snake_case_ : List[str] = 0
while True:
j += 1
snake_case_ : str = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
snake_case_ : str = get_cost_derivative(i - 1 )
snake_case_ : Any = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
snake_case_ : Dict = temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
print(('''Actual output value:''', output(_UpperCamelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(_UpperCamelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 60
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = len(lowercase )
for i in range(n - 1 ):
for j in range(i + 1 ,lowercase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(lowercase ) <= 1:
return arr, 0
_UpperCAmelCase = len(lowercase ) // 2
_UpperCAmelCase = arr[0:mid]
_UpperCAmelCase = arr[mid:]
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(lowercase )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(lowercase )
_UpperCAmelCase , _UpperCAmelCase = _count_cross_inversions(lowercase ,lowercase )
_UpperCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = 0
while i < len(lowercase ) and j < len(lowercase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowercase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowercase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_UpperCAmelCase = count_inversions_bf(lowercase )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(lowercase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ ,lowercase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_UpperCAmelCase = count_inversions_bf(lowercase )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(lowercase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ ,lowercase )
# an empty list should also have zero inversions
_UpperCAmelCase = []
_UpperCAmelCase = count_inversions_bf(lowercase )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(lowercase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ ,lowercase )
if __name__ == "__main__":
main()
| 277
| 0
|
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase ( A__ = 1_00_00_00 , A__ = 10 ) -> int:
_snake_case : defaultdict = defaultdict(A__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_snake_case : Optional[int] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_snake_case : Dict = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 519
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BlenderbotSmallConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , ):
"""simple docstring"""
_snake_case : Any = parent
_snake_case : str = batch_size
_snake_case : Optional[Any] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : int = use_labels
_snake_case : int = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : List[str] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : str = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : Optional[Any] = eos_token_id
_snake_case : Dict = pad_token_id
_snake_case : Dict = bos_token_id
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_snake_case : Dict = prepare_blenderbot_small_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Dict = TFBlenderbotSmallModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder()
_snake_case : Any = inputs_dict["""input_ids"""]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : Any = inputs_dict["""attention_mask"""][:1, :]
_snake_case : Dict = inputs_dict["""head_mask"""]
_snake_case : Dict = 1
# first forward pass
_snake_case : str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
_snake_case : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1e-3 )
def UpperCAmelCase ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case : str = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : int = TFBlenderbotSmallModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
SCREAMING_SNAKE_CASE_ = 'facebook/blenderbot_small-90M'
@cached_property
def __lowerCamelCase( self ):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Tuple = self.tokenizer(self.src_text , return_tensors="""tf""" )
_snake_case : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=SCREAMING_SNAKE_CASE__ , )
_snake_case : Optional[int] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 519
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : int=7 , __magic_name__ : Tuple=3 , __magic_name__ : Optional[int]=1_8 , __magic_name__ : List[str]=3_0 , __magic_name__ : Union[str, Any]=4_0_0 , __magic_name__ : Tuple=True , __magic_name__ : Optional[Any]=None , __magic_name__ : Any=True , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]=True , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 2_0}
UpperCamelCase = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_flip_channel_order
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = MobileViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = MobileViTImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_flip_channel_order""" ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 2_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 386
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def _lowercase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="""longest""" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def _lowercase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , SCREAMING_SNAKE_CASE_ ) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps )
UpperCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["""lr"""]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , local_sgd_steps=SCREAMING_SNAKE_CASE_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , SCREAMING_SNAKE_CASE_ )
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=SCREAMING_SNAKE_CASE_ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 386
| 1
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def a__ ( ):
'''simple docstring'''
__magic_name__ = HfArgumentParser(A_ )
__magic_name__ = parser.parse_args_into_dataclasses()[0]
__magic_name__ = TensorFlowBenchmark(args=A_ )
try:
__magic_name__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__magic_name__ = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
__magic_name__ = """ """.join(str(A_ ).split(""" """ )[:-1] )
__magic_name__ = """"""
__magic_name__ = eval(str(A_ ).split(""" """ )[-1] )
__magic_name__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A_ )
if len(A_ ) > 0:
__magic_name__ = full_error_msg + begin_error_msg + str(A_ )
raise ValueError(A_ )
benchmark.run()
if __name__ == "__main__":
main()
| 76
|
import collections
import importlib.util
import os
import re
from pathlib import Path
__lowerCAmelCase : int = 'src/transformers'
# Matches is_xxx_available()
__lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__lowerCAmelCase : int = re.compile(R'^\s*try:')
# Catches a line with else:
__lowerCAmelCase : Tuple = re.compile(R'^\s*else:')
def a__ ( A_ ):
'''simple docstring'''
if _re_test_backend.search(A_ ) is None:
return None
__magic_name__ = [b[0] for b in _re_backend.findall(A_ )]
backends.sort()
return "_and_".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
__magic_name__ = f.readlines()
__magic_name__ = 0
while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A_ ):
return None
# First grab the objects without a specific backend in _import_structure
__magic_name__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__magic_name__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A_ ):
__magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0]
__magic_name__ = re.findall("""\[([^\]]+)\]""", A_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__magic_name__ = _re_import_struct_key_value.search(A_ )
if single_line_import_search is not None:
__magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0]
objects.extend(A_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__magic_name__ = lines[line_index]
if _re_import_struct_add_one.search(A_ ) is not None:
objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] )
elif _re_import_struct_add_many.search(A_ ) is not None:
__magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_between_brackets.search(A_ ) is not None:
__magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ )
__magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_quote_object.search(A_ ) is not None:
objects.append(_re_quote_object.search(A_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__magic_name__ = []
while (
line_index < len(A_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__magic_name__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(A_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__magic_name__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__magic_name__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__magic_name__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__magic_name__ = lines[line_index]
__magic_name__ = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__magic_name__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( A_, A_ ):
'''simple docstring'''
def find_duplicates(A_ ):
return [k for k, v in collections.Counter(A_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__magic_name__ = []
for key in import_dict_objects.keys():
__magic_name__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__magic_name__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__magic_name__ = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
__magic_name__ = os.path.join(A_, """__init__.py""" )
__magic_name__ = parse_init(A_ )
if objects is not None:
__magic_name__ = analyze_results(*A_ )
if len(A_ ) > 0:
__magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(A_ ) )
if len(A_ ) > 0:
raise ValueError("""\n\n""".join(A_ ) )
def a__ ( ):
'''simple docstring'''
__magic_name__ = []
for path, directories, files in os.walk(A_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(A_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) )
__magic_name__ = short_path.replace(os.path.sep, """.""" )
submodules.append(A_ )
for fname in files:
if fname == "__init__.py":
continue
__magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) )
__magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(A_ )
return submodules
__lowerCAmelCase : Dict = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def a__ ( ):
'''simple docstring'''
__magic_name__ = importlib.util.spec_from_file_location(
"""transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
__magic_name__ = spec.loader.load_module()
__magic_name__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A_ ) > 0:
__magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 76
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class A__(a_ ):
"""simple docstring"""
_A : Union[str, Any] = '''speech_to_text_2'''
_A : Optional[Any] = ['''past_key_values''']
_A : List[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowercase=10_000 , _lowercase=6 , _lowercase=2_048 , _lowercase=4 , _lowercase=0.0 , _lowercase=True , _lowercase="relu" , _lowercase=256 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.0_2 , _lowercase=2 , _lowercase=True , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=1_024 , **_lowercase , ) -> List[str]:
a_ : Tuple = vocab_size
a_ : Optional[int] = d_model
a_ : Any = decoder_ffn_dim
a_ : Optional[int] = decoder_layers
a_ : Any = decoder_attention_heads
a_ : str = dropout
a_ : Any = attention_dropout
a_ : Tuple = activation_dropout
a_ : str = activation_function
a_ : int = init_std
a_ : Tuple = decoder_layerdrop
a_ : Any = use_cache
a_ : int = decoder_layers
a_ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
a_ : List[Any] = max_target_positions
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 540
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : int = logging.get_logger(__name__)
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : str = DPTConfig(embedding_type="""hybrid""")
if "large" in checkpoint_url:
a_ : List[str] = 1_0_2_4
a_ : List[str] = 4_0_9_6
a_ : Tuple = 2_4
a_ : str = 1_6
a_ : List[str] = [5, 1_1, 1_7, 2_3]
a_ : str = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
a_ : Any = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
a_ : int = 7_6_8
a_ : List[str] = [1, 1, 1, 0.5]
a_ : Tuple = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
a_ : Tuple = 1_5_0
a_ : Optional[Any] = 1_6
a_ : Union[str, Any] = (1, 3_8_4, 3_8_4)
a_ : Optional[int] = False
a_ : int = """project"""
if "ade" in checkpoint_url:
a_ : Union[str, Any] = True
a_ : int = 7_6_8
a_ : Dict = [1, 1, 1, 0.5]
a_ : Optional[int] = 1_5_0
a_ : Tuple = 1_6
a_ : str = """huggingface/label-files"""
a_ : List[Any] = """ade20k-id2label.json"""
a_ : Tuple = json.load(open(cached_download(hf_hub_url(a__ , a__ , repo_type="""dataset""")) , """r"""))
a_ : Optional[Any] = {int(a__): v for k, v in idalabel.items()}
a_ : str = idalabel
a_ : Optional[Any] = {v: k for k, v in idalabel.items()}
a_ : Optional[int] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Any = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(a__ , a__)
def _UpperCAmelCase ( a__):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a_ : Dict = name.replace("""pretrained.model""" , """dpt.encoder""")
if "pretrained.model" in name:
a_ : List[Any] = name.replace("""pretrained.model""" , """dpt.embeddings""")
if "patch_embed" in name:
a_ : List[str] = name.replace("""patch_embed""" , """""")
if "pos_embed" in name:
a_ : Tuple = name.replace("""pos_embed""" , """position_embeddings""")
if "attn.proj" in name:
a_ : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""")
if "proj" in name and "project" not in name:
a_ : Tuple = name.replace("""proj""" , """projection""")
if "blocks" in name:
a_ : Any = name.replace("""blocks""" , """layer""")
if "mlp.fc1" in name:
a_ : Tuple = name.replace("""mlp.fc1""" , """intermediate.dense""")
if "mlp.fc2" in name:
a_ : List[str] = name.replace("""mlp.fc2""" , """output.dense""")
if "norm1" in name and "backbone" not in name:
a_ : Dict = name.replace("""norm1""" , """layernorm_before""")
if "norm2" in name and "backbone" not in name:
a_ : str = name.replace("""norm2""" , """layernorm_after""")
if "scratch.output_conv" in name:
a_ : List[Any] = name.replace("""scratch.output_conv""" , """head""")
if "scratch" in name:
a_ : Optional[int] = name.replace("""scratch""" , """neck""")
if "layer1_rn" in name:
a_ : List[Any] = name.replace("""layer1_rn""" , """convs.0""")
if "layer2_rn" in name:
a_ : Any = name.replace("""layer2_rn""" , """convs.1""")
if "layer3_rn" in name:
a_ : List[Any] = name.replace("""layer3_rn""" , """convs.2""")
if "layer4_rn" in name:
a_ : Optional[Any] = name.replace("""layer4_rn""" , """convs.3""")
if "refinenet" in name:
a_ : Tuple = int(name[len("""neck.refinenet""") : len("""neck.refinenet""") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a_ : List[Any] = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
a_ : List[Any] = name.replace("""out_conv""" , """projection""")
if "resConfUnit1" in name:
a_ : List[str] = name.replace("""resConfUnit1""" , """residual_layer1""")
if "resConfUnit2" in name:
a_ : Optional[int] = name.replace("""resConfUnit2""" , """residual_layer2""")
if "conv1" in name:
a_ : List[str] = name.replace("""conv1""" , """convolution1""")
if "conv2" in name:
a_ : Optional[Any] = name.replace("""conv2""" , """convolution2""")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""")
if "pretrained.act_postprocess2.0.project.0" in name:
a_ : str = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""")
if "pretrained.act_postprocess3.0.project.0" in name:
a_ : List[str] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""")
if "pretrained.act_postprocess4.0.project.0" in name:
a_ : Any = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a_ : Dict = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""")
if "pretrained.act_postprocess1.4" in name:
a_ : Dict = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""")
if "pretrained.act_postprocess2.3" in name:
a_ : int = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""")
if "pretrained.act_postprocess2.4" in name:
a_ : List[Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""")
if "pretrained.act_postprocess3.3" in name:
a_ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""")
if "pretrained.act_postprocess4.3" in name:
a_ : Dict = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""")
if "pretrained.act_postprocess4.4" in name:
a_ : Tuple = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""")
if "pretrained" in name:
a_ : List[Any] = name.replace("""pretrained""" , """dpt""")
if "bn" in name:
a_ : Optional[int] = name.replace("""bn""" , """batch_norm""")
if "head" in name:
a_ : Union[str, Any] = name.replace("""head""" , """head.head""")
if "encoder.norm" in name:
a_ : Any = name.replace("""encoder.norm""" , """layernorm""")
if "auxlayer" in name:
a_ : Any = name.replace("""auxlayer""" , """auxiliary_head.head""")
if "backbone" in name:
a_ : Optional[Any] = name.replace("""backbone""" , """backbone.bit.encoder""")
if ".." in name:
a_ : str = name.replace("""..""" , """.""")
if "stem.conv" in name:
a_ : Union[str, Any] = name.replace("""stem.conv""" , """bit.embedder.convolution""")
if "blocks" in name:
a_ : Union[str, Any] = name.replace("""blocks""" , """layers""")
if "convolution" in name and "backbone" in name:
a_ : Dict = name.replace("""convolution""" , """conv""")
if "layer" in name and "backbone" in name:
a_ : Optional[int] = name.replace("""layer""" , """layers""")
if "backbone.bit.encoder.bit" in name:
a_ : Union[str, Any] = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""")
if "embedder.conv" in name:
a_ : List[str] = name.replace("""embedder.conv""" , """embedder.convolution""")
if "backbone.bit.encoder.stem.norm" in name:
a_ : str = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""")
return name
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ : Optional[int] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
a_ : str = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
a_ : Any = in_proj_weight[: config.hidden_size, :]
a_ : List[str] = in_proj_bias[: config.hidden_size]
a_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ : Any = in_proj_weight[
-config.hidden_size :, :
]
a_ : List[str] = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Union[str, Any] = Image.open(requests.get(a__ , stream=a__).raw)
return im
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
a_ , a_ : str = get_dpt_config(a__)
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
a_ : str = torch.load(a__ , map_location="""cpu""")
# remove certain keys
remove_ignore_keys_(a__)
# rename keys
for key in state_dict.copy().keys():
a_ : List[str] = state_dict.pop(a__)
a_ : str = val
# read in qkv matrices
read_in_q_k_v(a__ , a__)
# load HuggingFace model
a_ : Any = DPTForSemanticSegmentation(a__) if """ade""" in checkpoint_url else DPTForDepthEstimation(a__)
model.load_state_dict(a__)
model.eval()
# Check outputs on an image
a_ : Any = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
a_ : Any = DPTImageProcessor(size=a__)
a_ : Dict = prepare_img()
a_ : int = image_processor(a__ , return_tensors="""pt""")
# forward pass
a_ : Optional[Any] = model(**a__).logits if """ade""" in checkpoint_url else model(**a__).predicted_depth
if show_prediction:
a_ : List[str] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=a__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5).show()
if pytorch_dump_folder_path is not None:
Path(a__).mkdir(exist_ok=a__)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(a__)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(a__)
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""")
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""")
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
__snake_case : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 540
| 1
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : Dict =1
snake_case : int =3
snake_case : Dict =(32, 32)
snake_case : List[Any] =floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(_snake_case )
return image
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Dict =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
return model
@property
def __snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Dict =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
return model
@property
def __snake_case ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : str =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
return CLIPTextModel(_snake_case )
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
def extract(*_snake_case : Optional[Any], **_snake_case : Union[str, Any] ):
class lowerCAmelCase_ :
def __init__( self : List[str] ):
'''simple docstring'''
snake_case : Any =torch.ones([0] )
def __snake_case ( self : str, _snake_case : int ):
'''simple docstring'''
self.pixel_values.to(_snake_case )
return self
return Out()
return extract
def __snake_case ( self : List[Any] ):
'''simple docstring'''
snake_case : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : Union[str, Any] =self.dummy_cond_unet
snake_case : Optional[Any] =DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=_snake_case, set_alpha_to_one=_snake_case, )
snake_case : Any =self.dummy_vae
snake_case : Optional[Any] =self.dummy_text_encoder
snake_case : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
snake_case : Tuple =StableDiffusionPipeline(
unet=_snake_case, scheduler=_snake_case, vae=_snake_case, text_encoder=_snake_case, tokenizer=_snake_case, safety_checker=_snake_case, feature_extractor=self.dummy_extractor, )
snake_case : List[Any] =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case : Dict ='''A painting of a squirrel eating a burger'''
snake_case : Dict =torch.Generator(device=_snake_case ).manual_seed(0 )
snake_case : Optional[Any] =sd_pipe([prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' )
snake_case : List[str] =output.images
snake_case : Optional[int] =torch.Generator(device=_snake_case ).manual_seed(0 )
snake_case : int =sd_pipe(
[prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=_snake_case, )[0]
snake_case : str =image[0, -3:, -3:, -1]
snake_case : str =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : Any =np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Any ='''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : Dict =self.dummy_cond_unet
snake_case : Any =PNDMScheduler(skip_prk_steps=_snake_case )
snake_case : List[str] =self.dummy_vae
snake_case : Tuple =self.dummy_text_encoder
snake_case : int =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
snake_case : str =StableDiffusionPipeline(
unet=_snake_case, scheduler=_snake_case, vae=_snake_case, text_encoder=_snake_case, tokenizer=_snake_case, safety_checker=_snake_case, feature_extractor=self.dummy_extractor, )
snake_case : Optional[Any] =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case : List[Any] ='''A painting of a squirrel eating a burger'''
snake_case : Union[str, Any] =torch.Generator(device=_snake_case ).manual_seed(0 )
snake_case : Tuple =sd_pipe([prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' )
snake_case : List[str] =output.images
snake_case : Optional[Any] =torch.Generator(device=_snake_case ).manual_seed(0 )
snake_case : List[str] =sd_pipe(
[prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=_snake_case, )[0]
snake_case : Optional[int] =image[0, -3:, -3:, -1]
snake_case : Optional[int] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : Dict =np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : Optional[int] =StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''', safety_checker=_snake_case )
assert isinstance(_snake_case, _snake_case )
assert isinstance(pipe.scheduler, _snake_case )
assert pipe.safety_checker is None
snake_case : Dict =pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
snake_case : Any =StableDiffusionPipeline.from_pretrained(_snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
snake_case : Any =pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Dict =self.dummy_cond_unet
snake_case : Optional[int] =PNDMScheduler(skip_prk_steps=_snake_case )
snake_case : str =self.dummy_vae
snake_case : Any =self.dummy_text_encoder
snake_case : Optional[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
snake_case : Optional[Any] =unet.half()
snake_case : int =vae.half()
snake_case : Tuple =bert.half()
# make sure here that pndm scheduler skips prk
snake_case : str =StableDiffusionPipeline(
unet=_snake_case, scheduler=_snake_case, vae=_snake_case, text_encoder=_snake_case, tokenizer=_snake_case, safety_checker=_snake_case, feature_extractor=self.dummy_extractor, )
snake_case : Tuple =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case : Optional[Any] ='''A painting of a squirrel eating a burger'''
snake_case : Optional[int] =sd_pipe([prompt], num_inference_steps=2, output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : int =StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=_snake_case )
snake_case : Any =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
snake_case : Any =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case : List[str] =(
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
snake_case : Optional[Any] =4_003_660_346
snake_case : List[Any] =7
# without safety guidance (sld_guidance_scale = 0)
snake_case : List[Any] =torch.manual_seed(_snake_case )
snake_case : Tuple =sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, )
snake_case : Any =output.images
snake_case : Union[str, Any] =image[0, -3:, -3:, -1]
snake_case : Dict =[0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
snake_case : List[str] =torch.manual_seed(_snake_case )
snake_case : Tuple =sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
snake_case : Optional[Any] =output.images
snake_case : List[Any] =image[0, -3:, -3:, -1]
snake_case : Tuple =[0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : str =StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=_snake_case )
snake_case : Dict =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
snake_case : Optional[Any] =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case : Any ='''padme amidala taking a bath artwork, safe for work, no nudity'''
snake_case : int =2_734_971_755
snake_case : List[Any] =7
snake_case : List[Any] =torch.manual_seed(_snake_case )
snake_case : Optional[int] =sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, )
snake_case : Optional[Any] =output.images
snake_case : Optional[int] =image[0, -3:, -3:, -1]
snake_case : List[Any] =[0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
snake_case : List[Any] =torch.manual_seed(_snake_case )
snake_case : Any =sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
snake_case : Optional[Any] =output.images
snake_case : Any =image[0, -3:, -3:, -1]
snake_case : int =[0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : Dict =StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
snake_case : List[Any] =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case : Optional[int] =(
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
snake_case : Any =1_044_355_234
snake_case : str =12
snake_case : Any =torch.manual_seed(_snake_case )
snake_case : List[Any] =sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, )
snake_case : List[Any] =output.images
snake_case : Tuple =image[0, -3:, -3:, -1]
snake_case : Optional[Any] =np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
snake_case : List[str] =torch.manual_seed(_snake_case )
snake_case : Dict =sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
snake_case : int =output.images
snake_case : Optional[int] =image[0, -3:, -3:, -1]
snake_case : int =np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 136
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
A : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _a ( lowerCamelCase_ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
return max(metric_fn(lowerCamelCase_ , lowerCamelCase_ ) for gt in ground_truths )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Tuple =[line.strip() for line in open(lowerCamelCase_ , '''r''' ).readlines()]
snake_case : Optional[Any] =[]
if args.gold_data_mode == "qa":
snake_case : int =pd.read_csv(lowerCamelCase_ , sep='''\t''' , header=lowerCamelCase_ )
for answer_list in data[1]:
snake_case : int =ast.literal_eval(lowerCamelCase_ )
answers.append(lowerCamelCase_ )
else:
snake_case : Tuple =[line.strip() for line in open(lowerCamelCase_ , '''r''' ).readlines()]
snake_case : Union[str, Any] =[[reference] for reference in references]
snake_case : str =0
for prediction, ground_truths in zip(lowerCamelCase_ , lowerCamelCase_ ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
fa += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
snake_case : List[str] =1_00.0 * em / total
snake_case : Union[str, Any] =1_00.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[int] =args.k
snake_case : List[Any] =[line.strip() for line in open(lowerCamelCase_ , '''r''' ).readlines()]
snake_case : Tuple =[line.strip() for line in open(lowerCamelCase_ , '''r''' ).readlines()]
snake_case : int =0
for hypo, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
snake_case : int =set(hypo.split('''\t''' )[:k] )
snake_case : Union[str, Any] =set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case : List[str] =1_00.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
def strip_title(lowerCamelCase_ ):
if title.startswith('''"''' ):
snake_case : Optional[int] =title[1:]
if title.endswith('''"''' ):
snake_case : List[Any] =title[:-1]
return title
snake_case : Optional[Any] =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , )['''input_ids'''].to(args.device )
snake_case : str =rag_model.rag.question_encoder(lowerCamelCase_ )
snake_case : Union[str, Any] =question_enc_outputs[0]
snake_case : str =rag_model.retriever(
lowerCamelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
snake_case : Union[str, Any] =rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case : Tuple =[]
for docs in all_docs:
snake_case : Union[str, Any] =[strip_title(lowerCamelCase_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(lowerCamelCase_ ) )
return provenance_strings
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
with torch.no_grad():
snake_case : Any =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
snake_case : Optional[Any] =inputs_dict.input_ids.to(args.device )
snake_case : Any =inputs_dict.attention_mask.to(args.device )
snake_case : Any =rag_model.generate( # rag_model overwrites generate
lowerCamelCase_ , attention_mask=lowerCamelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case : str =rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
if args.print_predictions:
for q, a in zip(lowerCamelCase_ , lowerCamelCase_ ):
logger.info('''Q: {} - A: {}'''.format(lowerCamelCase_ , lowerCamelCase_ ) )
return answers
def _a ( ):
snake_case : str =argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=lowerCamelCase_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=lowerCamelCase_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=lowerCamelCase_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=lowerCamelCase_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=lowerCamelCase_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=lowerCamelCase_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=lowerCamelCase_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=lowerCamelCase_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=lowerCamelCase_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=lowerCamelCase_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=lowerCamelCase_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=lowerCamelCase_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
snake_case : int =parser.parse_args()
snake_case : Any =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def _a ( lowerCamelCase_ ):
snake_case : Optional[int] ={}
if args.model_type is None:
snake_case : str =infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
snake_case : Optional[int] =RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
snake_case : List[Any] =args.n_docs
if args.index_name is not None:
snake_case : Dict =args.index_name
if args.index_path is not None:
snake_case : Any =args.index_path
else:
snake_case : str =BartForConditionalGeneration
snake_case : Dict =(
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , lowerCamelCase_ )
snake_case : Dict =get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
snake_case : Any =evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(lowerCamelCase_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
snake_case : List[str] =RagRetriever.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
snake_case : List[str] =model_class.from_pretrained(lowerCamelCase_ , retriever=lowerCamelCase_ , **lowerCamelCase_ )
model.retriever.init_retrieval()
else:
snake_case : Optional[int] =model_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
snake_case : Optional[int] =[]
for line in tqdm(lowerCamelCase_ ):
questions.append(line.strip() )
if len(lowerCamelCase_ ) == args.eval_batch_size:
snake_case : List[str] =evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write('''\n'''.join(lowerCamelCase_ ) + '''\n''' )
preds_file.flush()
snake_case : Tuple =[]
if len(lowerCamelCase_ ) > 0:
snake_case : str =evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write('''\n'''.join(lowerCamelCase_ ) )
preds_file.flush()
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
A : Union[str, Any] = get_args()
main(args)
| 136
| 1
|
import csv
import tweepy
# Twitter API credentials
snake_case : Union[str, Any] = ""
snake_case : Union[str, Any] = ""
snake_case : str = ""
snake_case : str = ""
def lowerCAmelCase_ ( _snake_case : str ) -> None:
'''simple docstring'''
__magic_name__ : int = tweepy.OAuthHandler(_snake_case , _snake_case )
auth.set_access_token(_snake_case , _snake_case )
__magic_name__ : Union[str, Any] = tweepy.API(_snake_case )
# initialize a list to hold all the tweepy Tweets
__magic_name__ : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__magic_name__ : Dict = api.user_timeline(screen_name=_snake_case , count=200 )
# save most recent tweets
alltweets.extend(_snake_case )
# save the id of the oldest tweet less one
__magic_name__ : Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_snake_case ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
__magic_name__ : int = api.user_timeline(
screen_name=_snake_case , count=200 , max_id=_snake_case )
# save most recent tweets
alltweets.extend(_snake_case )
# update the id of the oldest tweet less one
__magic_name__ : Any = alltweets[-1].id - 1
print(F'''...{len(_snake_case )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
__magic_name__ : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , "w" ) as f:
__magic_name__ : Optional[Any] = csv.writer(_snake_case )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_snake_case )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 124
|
from typing import Dict
from .base import GenericTensor, Pipeline
class _snake_case ( snake_case ):
def SCREAMING_SNAKE_CASE ( self , _a=None , _a=None , _a=None , **_a ):
if tokenize_kwargs is None:
__magic_name__ : Tuple = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
__magic_name__ : Dict = truncation
__magic_name__ : str = tokenize_kwargs
__magic_name__ : Any = {}
if return_tensors is not None:
__magic_name__ : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self , _a , **_a ):
__magic_name__ : List[Any] = self.framework
__magic_name__ : Union[str, Any] = self.tokenizer(_a , return_tensors=_a , **_a )
return model_inputs
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : List[Any] = self.model(**_a )
return model_outputs
def SCREAMING_SNAKE_CASE ( self , _a , _a=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_a , **_a ):
return super().__call__(*_a , **_a )
| 124
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase__ = "\nHuman: <<task>>\n\nAssistant: "
UpperCAmelCase__ = "huggingface-tools/default-prompts"
UpperCAmelCase__ = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def _A( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str="run" ) -> Optional[Any]:
'''simple docstring'''
if prompt_or_repo_id is None:
__lowercase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , UpperCamelCase__ ) is not None:
return prompt_or_repo_id
__lowercase = cached_file(
UpperCamelCase__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 362
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCamelCase_ : Any = (('num_inference_steps', 25),)
def UpperCAmelCase_ ( self : List[str] , **lowerCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**lowerCamelCase__ )
return config
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : Union[str, Any]=0 , **lowerCamelCase__ : Dict ) -> Any:
"""simple docstring"""
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' , lowerCamelCase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config(**lowerCamelCase__ )
__lowercase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
__lowercase = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowercase , __lowercase = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__lowercase = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self : Any ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Any=0 , **lowerCamelCase__ : Any ) -> Any:
"""simple docstring"""
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' , lowerCamelCase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
__lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
__lowercase = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
__lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__lowercase = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
if scheduler is None:
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**lowerCamelCase__ )
__lowercase = scheduler_class(**lowerCamelCase__ )
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**lowerCamelCase__ )
__lowercase = scheduler_class(**lowerCamelCase__ )
__lowercase = 10
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' , lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCamelCase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , '''set_timesteps''' ):
__lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__lowercase = dummy_past_residuals[: scheduler.config.solver_order]
__lowercase = scheduler.timesteps[5]
__lowercase = scheduler.timesteps[6]
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowercase = self.full_loop(scheduler=lowerCamelCase__ )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
__lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowercase = UniPCMultistepScheduler.from_config(scheduler.config )
__lowercase = DEISMultistepScheduler.from_config(scheduler.config )
__lowercase = self.full_loop(scheduler=lowerCamelCase__ )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def UpperCAmelCase_ ( self : int ) -> List[Any]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type='''deis''' , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
__lowercase = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.full_loop()
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.full_loop(prediction_type='''v_prediction''' )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
__lowercase = scheduler_class(**lowerCamelCase__ )
__lowercase = 10
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 362
| 1
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def A ( _A, _A, _A ):
"""simple docstring"""
snake_case_ :Any = [0] * no_of_processes
snake_case_ :Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_A ):
snake_case_ :Tuple = burst_time[i]
snake_case_ :list[int] = []
snake_case_ :int = 0
snake_case_ :List[str] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case_ :List[Any] = []
snake_case_ :Any = -1
for i in range(_A ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_A )
if len(_A ) > 0:
snake_case_ :List[str] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case_ :Optional[int] = i
total_time += burst_time[target_process]
completed += 1
snake_case_ :List[Any] = 0
snake_case_ :Any = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A ( _A, _A, _A ):
"""simple docstring"""
snake_case_ :List[Any] = [0] * no_of_processes
for i in range(_A ):
snake_case_ :List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
__UpperCAmelCase : Optional[Any] = 4
__UpperCAmelCase : Union[str, Any] = [2, 5, 3, 7]
__UpperCAmelCase : List[Any] = [0, 0, 0, 0]
__UpperCAmelCase : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__UpperCAmelCase : Optional[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 584
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase : str = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 584
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 2_00 ) -> int:
'''simple docstring'''
return two_pound(__lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 100
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 100
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
if n == 1 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
return 0
elif n == 2:
return 1
else:
_UpperCAmelCase : List[str] =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : int =0
_UpperCAmelCase : Any =2
while digits < n:
index += 1
_UpperCAmelCase : int =len(str(fibonacci(__lowerCamelCase ) ) )
return index
def lowerCamelCase__ ( __lowerCamelCase : int = 1_0_0_0 ):
'''simple docstring'''
return fibonacci_digits_index(__lowerCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 446
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , **snake_case) -> Dict:
'''simple docstring'''
super().__init__(*snake_case , **snake_case)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def lowerCAmelCase ( self , snake_case=None) -> str:
'''simple docstring'''
_UpperCAmelCase : Any ={}
if top_k is not None:
_UpperCAmelCase : Optional[int] =top_k
return {}, {}, postprocess_params
def __call__( self , snake_case , **snake_case) -> List[str]:
'''simple docstring'''
return super().__call__(snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =load_image(snake_case)
_UpperCAmelCase : Tuple =self.image_processor(images=snake_case , return_tensors=self.framework)
return model_inputs
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.model(**snake_case)
return model_outputs
def lowerCAmelCase ( self , snake_case , snake_case=5) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
_UpperCAmelCase : Optional[Any] =self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase : List[Any] =model_outputs.logits.softmax(-1)[0]
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =probs.topk(snake_case)
elif self.framework == "tf":
_UpperCAmelCase : int =stable_softmax(model_outputs.logits , axis=-1)[0]
_UpperCAmelCase : List[Any] =tf.math.top_k(snake_case , k=snake_case)
_UpperCAmelCase , _UpperCAmelCase : List[str] =topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
_UpperCAmelCase : int =scores.tolist()
_UpperCAmelCase : Tuple =ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case , snake_case)]
| 446
| 1
|
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __snake_case ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = list(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __snake_case ( SCREAMING_SNAKE_CASE_ : Exception ) -> bool:
"""simple docstring"""
UpperCAmelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : callable = None , SCREAMING_SNAKE_CASE_ : int = 128 ) -> Dict:
"""simple docstring"""
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE_ , starting_batch_size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase = list(inspect.signature(SCREAMING_SNAKE_CASE_ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE_ ) < (len(SCREAMING_SNAKE_CASE_ ) + 1):
UpperCAmelCase = ''', '''.join([f"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"Batch size was passed into `{function.__name__}` as the first argument when called."
f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 570
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a__ : Dict = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 570
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ : str = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE__ : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Optional[Any] , snake_case : Tuple , snake_case : str ):
"""simple docstring"""
super().__init__(snake_case , snake_case )
def __call__( self : str , snake_case : Optional[int] , snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case : bool = True , snake_case : Union[bool, str, PaddingStrategy] = False , snake_case : Union[bool, str, TruncationStrategy] = None , snake_case : Optional[int] = None , snake_case : int = 0 , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = True , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : Optional[Any] , ):
"""simple docstring"""
_snake_case : Optional[int] = self.tokenizer(
text=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_token_type_ids=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
# add pixel_values + pixel_mask
_snake_case : Optional[int] = self.image_processor(
snake_case , return_tensors=snake_case , do_normalize=snake_case , do_center_crop=snake_case , **snake_case )
encoding.update(snake_case )
return encoding
def __UpperCAmelCase ( self : Tuple , *snake_case : Optional[Any] , **snake_case : str ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def __UpperCAmelCase ( self : str , *snake_case : Optional[int] , **snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case : Tuple = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 517
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
SCREAMING_SNAKE_CASE_ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
SCREAMING_SNAKE_CASE_ = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def lowerCamelCase__ ( a__ , a__) -> List[str]:
"""simple docstring"""
with open(a__ , 'r' , encoding='utf-8') as f:
_snake_case : Union[str, Any] = json.loads(f.read())
_snake_case : List[Any] = collections.OrderedDict()
_snake_case : str = collections.OrderedDict()
_snake_case : Union[str, Any] = collections.OrderedDict()
with open(a__ , 'r' , encoding='utf-8') as f:
_snake_case : Tuple = f.readlines()
_snake_case : Optional[Any] = [[t.rstrip('\n')] if (t == ',' or ',' not in t) else t.rstrip('\n').split(',') for t in token]
for idx, b in enumerate(a__):
_snake_case : Optional[int] = b
_snake_case : Optional[Any] = idx
for wd in b:
_snake_case : Tuple = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , snake_case : str , snake_case : Any , snake_case : str="<|endoftext|>" , snake_case : List[str]="<|endoftext|>" , snake_case : Optional[Any]="<|startoftext|>" , snake_case : str="<|endoftext|>" , snake_case : Any=False , **snake_case : List[Any] , ):
"""simple docstring"""
super().__init__(
unk_token=snake_case , pad_token=snake_case , bos_token=snake_case , eos_token=snake_case , do_clean_text=snake_case , **snake_case , )
if not os.path.isfile(snake_case ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
_snake_case : Any = do_clean_text
_snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = load_vocab_and_emoji(snake_case , snake_case )
_snake_case : Optional[int] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return len(self.raw_vocab )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Optional[int] , snake_case : Optional[Any] ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(snake_case , clean=self.do_clean_text )
def __UpperCAmelCase ( self : Optional[int] , snake_case : Optional[Any] ):
"""simple docstring"""
return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) )
def __UpperCAmelCase ( self : List[str] , snake_case : Tuple ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(snake_case )
def __UpperCAmelCase ( self : List[Any] , snake_case : Dict ):
"""simple docstring"""
_snake_case : List[Any] = ''.join(snake_case ).strip()
return out_string
def __UpperCAmelCase ( self : Optional[Any] , snake_case : "Conversation" ):
"""simple docstring"""
_snake_case : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
_snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
def __UpperCAmelCase ( self : List[str] , snake_case : str , snake_case : Optional[str] = None ):
"""simple docstring"""
_snake_case : Optional[int] = 0
if os.path.isdir(snake_case ):
_snake_case : List[str] = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : List[Any] = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
_snake_case : List[Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
_snake_case : List[str] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
_snake_case : List[str] = token_index
writer.write(','.join(snake_case ) + '\n' )
index += 1
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , snake_case )
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
def __init__( self : str , snake_case : str , snake_case : List[str] , snake_case : List[str] ):
"""simple docstring"""
_snake_case : List[str] = vocab # same as swe
_snake_case : List[str] = ids_to_tokens # same as bpe
_snake_case : int = emoji
_snake_case : List[str] = np.max([len(snake_case ) for w in self.vocab.keys()] )
_snake_case : Dict = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
_snake_case : int = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
_snake_case : List[Any] = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
_snake_case : List[str] = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_snake_case : Tuple = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_snake_case : Union[str, Any] = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
_snake_case : Optional[int] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
_snake_case : Dict = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
_snake_case : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : int ):
"""simple docstring"""
return len(self.ids_to_tokens )
def __UpperCAmelCase ( self : Dict , snake_case : List[str] ):
"""simple docstring"""
_snake_case : str = self.content_repattera.sub('<URL>' , snake_case )
_snake_case : List[str] = self.content_repattera.sub('<EMAIL>' , snake_case )
_snake_case : Dict = self.content_repattera.sub('<TEL>' , snake_case )
_snake_case : Optional[int] = self.content_repattera.sub('<DATE>' , snake_case )
_snake_case : Tuple = self.content_repattera.sub('<DATE>' , snake_case )
_snake_case : str = self.content_repattera.sub('<PRICE>' , snake_case )
_snake_case : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_snake_case : int = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def __UpperCAmelCase ( self : Any , snake_case : Tuple , snake_case : str=False ):
"""simple docstring"""
_snake_case : List[Any] = text.replace(' ' , '<SP>' )
_snake_case : List[Any] = text.replace(' ' , '<SP>' )
_snake_case : List[str] = text.replace('\r\n' , '<BR>' )
_snake_case : int = text.replace('\n' , '<BR>' )
_snake_case : Dict = text.replace('\r' , '<BR>' )
_snake_case : int = text.replace('\t' , '<TAB>' )
_snake_case : int = text.replace('—' , 'ー' )
_snake_case : str = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
_snake_case : Optional[Any] = text.replace(snake_case , snake_case )
if clean:
_snake_case : List[str] = self.clean_text(snake_case )
def check_simbol(snake_case : Dict ):
_snake_case : Optional[Any] = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 2:
_snake_case : int = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2a1 and c <= 0Xc2bf)
or (c >= 0Xc780 and c <= 0Xc783)
or (c >= 0Xcab9 and c <= 0Xcbbf)
or (c >= 0Xcc80 and c <= 0Xcda2)
):
return True
return False
def checkuae(snake_case : List[Any] ):
_snake_case : int = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 3:
_snake_case : Optional[int] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe28080 and c <= 0Xe2b07f:
return True
return False
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = []
while pos < len(snake_case ):
_snake_case : Tuple = min(len(snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
_snake_case : Union[str, Any] = [] # (token_id, token, pos)
for e in range(snake_case , snake_case , -1 ):
_snake_case : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case ) > 2:
_snake_case : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case ) > 0:
# the smallest token_id is adopted
_snake_case , _snake_case , _snake_case : List[str] = sorted(snake_case , key=lambda snake_case : x[0] )[0]
result.append(snake_case )
_snake_case : Optional[Any] = e
else:
_snake_case : int = pos + 1
_snake_case : Tuple = text[pos:end]
if check_simbol(snake_case ):
result.append('<KIGOU>' )
elif checkuae(snake_case ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
_snake_case : int = end
return result
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : Tuple , snake_case : str="\n" ):
"""simple docstring"""
_snake_case : Union[str, Any] = []
_snake_case : List[str] = []
_snake_case : Optional[int] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
_snake_case : Union[str, Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case )
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
_snake_case : Dict = ''.join(snake_case )
return text
| 517
| 1
|
from torch import nn
class lowerCamelCase ( nn.Module ):
def __init__( self , lowercase__ , lowercase__):
super().__init__()
__UpperCAmelCase : str = class_size
__UpperCAmelCase : Union[str, Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__UpperCAmelCase : List[str] = nn.Linear(lowercase__ , lowercase__)
def A( self , lowercase__):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
__UpperCAmelCase : Union[str, Any] = self.mlp(lowercase__)
return logits
| 675
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'beit'
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any=8192 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : str=3072 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : str=1E-12 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : Optional[Any]=16 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[Any]=[3, 5, 7, 11] , UpperCAmelCase__ : List[str]=[1, 2, 3, 6] , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=0.4 , UpperCAmelCase__ : List[Any]=256 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=255 , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : List[str] =vocab_size
lowercase : str =hidden_size
lowercase : Any =num_hidden_layers
lowercase : List[str] =num_attention_heads
lowercase : List[Any] =intermediate_size
lowercase : Optional[int] =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Any =attention_probs_dropout_prob
lowercase : List[str] =initializer_range
lowercase : int =layer_norm_eps
lowercase : str =image_size
lowercase : Union[str, Any] =patch_size
lowercase : Tuple =num_channels
lowercase : List[Any] =use_mask_token
lowercase : Optional[int] =use_absolute_position_embeddings
lowercase : Any =use_relative_position_bias
lowercase : int =use_shared_relative_position_bias
lowercase : str =layer_scale_init_value
lowercase : int =drop_path_rate
lowercase : Optional[int] =use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase : Dict =out_indices
lowercase : int =pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase : Optional[Any] =use_auxiliary_head
lowercase : Optional[int] =auxiliary_loss_weight
lowercase : Optional[int] =auxiliary_channels
lowercase : Any =auxiliary_num_convs
lowercase : Tuple =auxiliary_concat_input
lowercase : List[Any] =semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return 1E-4
| 92
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_a: int = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Tuple = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Any = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Union[str, Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Optional[Any] = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 162
| 0
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = """detr"""
lowercase_ = ["""past_key_values"""]
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase__=True , lowercase__=None , lowercase__=3 , lowercase__=1_0_0 , lowercase__=6 , lowercase__=2_0_4_8 , lowercase__=8 , lowercase__=6 , lowercase__=2_0_4_8 , lowercase__=8 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__="relu" , lowercase__=2_5_6 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1.0 , lowercase__=False , lowercase__="sine" , lowercase__="resnet50" , lowercase__=True , lowercase__=False , lowercase__=1 , lowercase__=5 , lowercase__=2 , lowercase__=1 , lowercase__=1 , lowercase__=5 , lowercase__=2 , lowercase__=0.1 , **lowercase__ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__A =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowercase__ , lowercase__ ):
__A =backbone_config.get('''model_type''' )
__A =CONFIG_MAPPING[backbone_model_type]
__A =config_class.from_dict(lowercase__ )
# set timm attributes to None
__A , __A , __A =None, None, None
__A =use_timm_backbone
__A =backbone_config
__A =num_channels
__A =num_queries
__A =d_model
__A =encoder_ffn_dim
__A =encoder_layers
__A =encoder_attention_heads
__A =decoder_ffn_dim
__A =decoder_layers
__A =decoder_attention_heads
__A =dropout
__A =attention_dropout
__A =activation_dropout
__A =activation_function
__A =init_std
__A =init_xavier_std
__A =encoder_layerdrop
__A =decoder_layerdrop
__A =encoder_layers
__A =auxiliary_loss
__A =position_embedding_type
__A =backbone
__A =use_pretrained_backbone
__A =dilation
# Hungarian matcher
__A =class_cost
__A =bbox_cost
__A =giou_cost
# Loss coefficients
__A =mask_loss_coefficient
__A =dice_loss_coefficient
__A =bbox_loss_coefficient
__A =giou_loss_coefficient
__A =eos_coefficient
super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.d_model
@classmethod
def __UpperCamelCase ( cls , lowercase__ , **lowercase__ ):
'''simple docstring'''
return cls(backbone_config=lowercase__ , **lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__A =self.backbone_config.to_dict()
__A =self.__class__.model_type
return output
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = version.parse("""1.11""" )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 1E-5
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 1_2
| 516
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_lowerCamelCase : Optional[int] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase__ ):
'''simple docstring'''
super().__init__()
__A =torchvision.models.resnetaaa(pretrained=lowercase__ )
__A =list(model.children() )[:-2]
__A =nn.Sequential(*lowercase__ )
__A =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A =self.pool(self.model(lowercase__ ) )
__A =torch.flatten(lowercase__ , start_dim=2 )
__A =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =[json.loads(lowercase__ ) for l in open(lowercase__ )]
__A =os.path.dirname(lowercase__ )
__A =tokenizer
__A =labels
__A =len(lowercase__ )
__A =max_seq_length
__A =transforms
def __len__( self ):
'''simple docstring'''
return len(self.data )
def __getitem__( self , lowercase__ ):
'''simple docstring'''
__A =torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=lowercase__ ) )
__A , __A , __A =sentence[0], sentence[1:-1], sentence[-1]
__A =sentence[: self.max_seq_length]
__A =torch.zeros(self.n_classes )
__A =1
__A =Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
__A =self.transforms(lowercase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def A__ ( __A : Tuple ) ->Optional[int]:
__A =[len(row['''sentence'''] ) for row in batch]
__A , __A =len(__A ), max(__A )
__A =torch.zeros(__A , __A , dtype=torch.long )
__A =torch.zeros(__A , __A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__A , __A ) ):
__A =input_row['''sentence''']
__A =1
__A =torch.stack([row['''image'''] for row in batch] )
__A =torch.stack([row['''label'''] for row in batch] )
__A =torch.stack([row['''image_start_token'''] for row in batch] )
__A =torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A__ ( ) ->List[Any]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A__ ( ) ->List[Any]:
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 516
| 1
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->str:
UpperCAmelCase = OmegaConf.load(lowerCAmelCase_ )
UpperCAmelCase = torch.load(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
UpperCAmelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase = {}
UpperCAmelCase = """first_stage_model."""
for key in keys:
if key.startswith(lowerCAmelCase_ ):
UpperCAmelCase = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase = {}
UpperCAmelCase = """model.diffusion_model."""
for key in keys:
if key.startswith(lowerCAmelCase_ ):
UpperCAmelCase = state_dict[key]
UpperCAmelCase = config.model.params.first_stage_config.params
UpperCAmelCase = config.model.params.unet_config.params
UpperCAmelCase = VQModel(**lowerCAmelCase_ ).eval()
vqvae.load_state_dict(lowerCAmelCase_ )
UpperCAmelCase = UNetLDMModel(**lowerCAmelCase_ ).eval()
unet.load_state_dict(lowerCAmelCase_ )
UpperCAmelCase = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCAmelCase_ , )
UpperCAmelCase = LDMPipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
pipeline.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
__a = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 377
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct_text_model'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int]=5_0_2_4_4 , __lowerCamelCase : int=7_6_8 , __lowerCamelCase : Union[str, Any]=6_4 , __lowerCamelCase : Dict=2_0_4_8 , __lowerCamelCase : int=1_2 , __lowerCamelCase : Any=1_2 , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Any=1_2_8 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : str=1e-6 , __lowerCamelCase : int=1.0 , __lowerCamelCase : Optional[int]="gelu_new" , __lowerCamelCase : int=0 , __lowerCamelCase : int=False , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Dict , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = d_kv
UpperCAmelCase = d_ff
UpperCAmelCase = num_layers
UpperCAmelCase = num_heads
UpperCAmelCase = relative_attention_num_buckets
UpperCAmelCase = relative_attention_max_distance
UpperCAmelCase = dropout_rate
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_factor
UpperCAmelCase = use_cache
UpperCAmelCase = eos_token_id
UpperCAmelCase = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase = dense_act_fn
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , tie_word_embeddings=__lowerCamelCase , is_decoder=__lowerCamelCase , **__lowerCamelCase , )
@classmethod
def _lowercase ( cls : List[Any] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct_vision_model'''
def __init__( self : str , __lowerCamelCase : Any=7_6_8 , __lowerCamelCase : Dict=7_6_8 , __lowerCamelCase : List[Any]=2_0_4_8 , __lowerCamelCase : List[Any]=6_4 , __lowerCamelCase : Union[str, Any]=1_2 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Tuple=1e-6 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=1e-1_0 , __lowerCamelCase : Union[str, Any]=1.0 , __lowerCamelCase : Any=4_0_9_6 , __lowerCamelCase : Dict=3_2 , __lowerCamelCase : Any=1_2_8 , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
UpperCAmelCase = hidden_size
UpperCAmelCase = patch_embed_hidden_size
UpperCAmelCase = d_ff
UpperCAmelCase = dropout_rate
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = initializer_range
UpperCAmelCase = initializer_factor
UpperCAmelCase = attention_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = dense_act_fn
UpperCAmelCase = seq_len
UpperCAmelCase = relative_attention_num_buckets
UpperCAmelCase = relative_attention_max_distance
UpperCAmelCase = d_kv
@classmethod
def _lowercase ( cls : Any , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : int ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowercase ( __snake_case ):
UpperCamelCase = '''pix2struct'''
UpperCamelCase = True
def __init__( self : str , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=1.0 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=True , **__lowerCamelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(tie_word_embeddings=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
if text_config is None:
UpperCAmelCase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
UpperCAmelCase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
UpperCAmelCase = PixaStructTextConfig(**__lowerCamelCase )
UpperCAmelCase = PixaStructVisionConfig(**__lowerCamelCase )
UpperCAmelCase = self.text_config.decoder_start_token_id
UpperCAmelCase = self.text_config.pad_token_id
UpperCAmelCase = self.text_config.eos_token_id
UpperCAmelCase = initializer_factor
UpperCAmelCase = initializer_range
UpperCAmelCase = self.initializer_range
UpperCAmelCase = self.initializer_range
UpperCAmelCase = is_vqa
@classmethod
def _lowercase ( cls : List[Any] , __lowerCamelCase : PixaStructTextConfig , __lowerCamelCase : PixaStructVisionConfig , **__lowerCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 377
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "dpt"
def __init__( self :Union[str, Any] , __A :int=768 , __A :Optional[int]=12 , __A :Optional[Any]=12 , __A :Dict=3072 , __A :Optional[Any]="gelu" , __A :Union[str, Any]=0.0 , __A :Optional[Any]=0.0 , __A :int=0.0_2 , __A :Optional[Any]=1E-12 , __A :Union[str, Any]=384 , __A :int=16 , __A :Tuple=3 , __A :Tuple=False , __A :int=True , __A :Optional[Any]=[2, 5, 8, 11] , __A :Any="project" , __A :Any=[4, 2, 1, 0.5] , __A :str=[96, 192, 384, 768] , __A :int=256 , __A :Union[str, Any]=-1 , __A :List[Any]=False , __A :int=True , __A :List[Any]=0.4 , __A :Any=255 , __A :Any=0.1 , __A :Tuple=[1, 1024, 24, 24] , __A :int=[0, 1] , __A :Union[str, Any]=None , **__A :Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
SCREAMING_SNAKE_CASE__ = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
SCREAMING_SNAKE_CASE__ = BitConfig(**__A )
elif isinstance(__A , __A ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
SCREAMING_SNAKE_CASE__ = BitConfig(**__A )
elif isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
SCREAMING_SNAKE_CASE__ = backbone_featmap_shape
SCREAMING_SNAKE_CASE__ = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
SCREAMING_SNAKE_CASE__ = readout_type
SCREAMING_SNAKE_CASE__ = reassemble_factors
SCREAMING_SNAKE_CASE__ = neck_hidden_sizes
SCREAMING_SNAKE_CASE__ = fusion_hidden_size
SCREAMING_SNAKE_CASE__ = head_in_index
SCREAMING_SNAKE_CASE__ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ = use_auxiliary_head
SCREAMING_SNAKE_CASE__ = auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE__ = semantic_classifier_dropout
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 707
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCamelCase = '\\n Text data.\n Second line of data.'
_lowerCamelCase = 'file'
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
SCREAMING_SNAKE_CASE__ = bytes(UpperCamelCase__ , """utf-8""" )
with zstd.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , """w""" ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Dict , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
SCREAMING_SNAKE_CASE__ = input_paths[compression_format]
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = """custom_cache"""
SCREAMING_SNAKE_CASE__ = """custom_extracted_dir"""
SCREAMING_SNAKE_CASE__ = tmp_path / """custom_extracted_path"""
if default_extracted:
SCREAMING_SNAKE_CASE__ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , UpperCamelCase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE__ = xz_file
SCREAMING_SNAKE_CASE__ = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
SCREAMING_SNAKE_CASE__ = """./__missing_file__.txt"""
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
with pytest.raises(UpperCamelCase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
http_get("""https://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head("""s3://huggingface.co""" )
| 59
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCamelCase : Optional[int] = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 416
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : int = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class a ( UpperCamelCase_ ,unittest.TestCase ):
__lowercase = BartphoTokenizer
__lowercase = False
__lowercase = True
def lowerCAmelCase_ ( self )-> int:
'''simple docstring'''
super().setUp()
A__ : Optional[Any] =['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
A__ : Tuple =dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
A__ : Optional[int] ={'''unk_token''': '''<unk>'''}
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
A__ : int =BartphoTokenizer(__UpperCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self , **__UpperCamelCase )-> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase )-> Optional[Any]:
'''simple docstring'''
A__ : List[Any] ='''This is a là test'''
A__ : Optional[int] ='''This is a<unk><unk> test'''
return input_text, output_text
def lowerCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
A__ : Tuple =BartphoTokenizer(__UpperCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
A__ : Dict ='''This is a là test'''
A__ : Tuple ='''▁This ▁is ▁a ▁l à ▁t est'''.split()
A__ : Optional[int] =tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A__ : Union[str, Any] =tokens + [tokenizer.unk_token]
A__ : int =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
| 416
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class A__:
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Tuple=64 , __SCREAMING_SNAKE_CASE : str=None ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.random.default_rng(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = length
__SCREAMING_SNAKE_CASE = rng.normal(size=(length,) ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.length
def __getitem__( self : int , __SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class A__( torch.nn.Module ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : List[str]=False ) -> List[Any]:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__SCREAMING_SNAKE_CASE = True
def _a ( self : int , __SCREAMING_SNAKE_CASE : int=None ) -> List[str]:
"""simple docstring"""
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__SCREAMING_SNAKE_CASE = False
return x * self.a[0] + self.b[0]
class A__( torch.nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : int=False ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(__SCREAMING_SNAKE_CASE ).float() )
__SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(__SCREAMING_SNAKE_CASE ).float() )
__SCREAMING_SNAKE_CASE = True
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> List[Any]:
"""simple docstring"""
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__SCREAMING_SNAKE_CASE = False
return x * self.a + self.b
def _a ( UpperCAmelCase__ , UpperCAmelCase__ = 16 ) -> int:
from datasets import load_dataset
from transformers import AutoTokenizer
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__SCREAMING_SNAKE_CASE = load_dataset('''csv''' , data_files=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = datasets['''train'''].unique('''label''' )
__SCREAMING_SNAKE_CASE = {v: i for i, v in enumerate(UpperCAmelCase__ )}
def tokenize_function(UpperCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
if "label" in examples:
__SCREAMING_SNAKE_CASE = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__SCREAMING_SNAKE_CASE = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(UpperCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(UpperCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets['''train'''] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=2 )
__SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 716
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class A__( unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : str ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop('''image_processor_type''' )
__SCREAMING_SNAKE_CASE = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(__SCREAMING_SNAKE_CASE )
config.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier''' ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''clip-base''' )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def _a ( self : int ) -> Any:
"""simple docstring"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
__SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
class A__( __magic_name__ ):
lowerCAmelCase = True
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 690
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = os.path.join(args.tf_model_dir , "parameters.json" )
snake_case__ = json.loads(open(__lowerCAmelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
snake_case__ = args.output + ".pt"
snake_case__ = OrderedDict()
with tf.device("/CPU:0" ):
snake_case__ = tf.train.load_checkpoint(args.tf_model_dir )
snake_case__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case__ = reader.get_tensor(__lowerCAmelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
snake_case__ = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
snake_case__ = 8
snake_case__ = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.startswith("model/moe" ):
snake_case__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
snake_case__ = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
snake_case__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.endswith("/softmlp/kernel" ):
snake_case__ = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
snake_case__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
snake_case__ = key_name[-9:-7]
for i in range(16 ):
snake_case__ = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
snake_case__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.startswith("model/mlp" ):
snake_case__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
snake_case__ = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
snake_case__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.endswith("/p1/bias" ):
snake_case__ = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
snake_case__ = vnp.copy() # same because it is one dimensional
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.endswith("/p2/kernel" ):
snake_case__ = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
snake_case__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.endswith("/p2/bias" ):
snake_case__ = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
snake_case__ = vnp.copy() # same because it is one dimensional
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.startswith("model/ln" ):
snake_case__ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case__ = "model.blocks.%d.feed_forward.norm.bias" % player
snake_case__ = vnp.copy() # same because it is one dimensional
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.endswith("/g" ):
snake_case__ = "model.blocks.%d.feed_forward.norm.weight" % player
snake_case__ = vnp.copy() # same because it is one dimensional
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.startswith("model/att" ):
snake_case__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
snake_case__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case__ = state[:, 0, :, :]
snake_case__ = state[:, 1, :, :]
snake_case__ = state[:, 2, :, :]
snake_case__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
snake_case__ = torch.tensor(__lowerCAmelCase )
snake_case__ = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
snake_case__ = torch.tensor(__lowerCAmelCase )
snake_case__ = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.endswith("/o/kernel" ):
snake_case__ = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
snake_case__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.startswith("model/an" ):
snake_case__ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case__ = "model.blocks.%d.self_attn.norm.bias" % player
snake_case__ = vnp.copy() # same because it is one dimensional
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.endswith("/g" ):
snake_case__ = "model.blocks.%d.self_attn.norm.weight" % player
snake_case__ = vnp.copy() # same because it is one dimensional
snake_case__ = torch.tensor(__lowerCAmelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
snake_case__ = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
snake_case__ = "model.%s.weight" % nlayer
snake_case__ = vnp.copy() # same in embedded
snake_case__ = torch.tensor(__lowerCAmelCase )
if key_name.startswith("model/wte" ):
snake_case__ = "lm_head.weight"
snake_case__ = vnp.copy() # same in embedded
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name.startswith("model/wob" ):
snake_case__ = "final_logits_bias"
snake_case__ = vnp.copy() # same in embedded
snake_case__ = state.reshape((1, -1) )
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name == "model/dense/kernel":
snake_case__ = "model.last_project.weight"
snake_case__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ = torch.tensor(__lowerCAmelCase )
elif key_name == "model/dense_1/bias":
snake_case__ = "model.last_project.bias"
snake_case__ = vnp.copy() # same because it is one dimensional
snake_case__ = torch.tensor(__lowerCAmelCase )
torch.save(__lowerCAmelCase , args.output )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
__magic_name__ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 276
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = {'''facebook/bart-base''': BartForConditionalGeneration}
__magic_name__ = {'''facebook/bart-base''': BartTokenizer}
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=__lowerCAmelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--config_name" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=__lowerCAmelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Where to store the final ONNX file." )
snake_case__ = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase="cpu" ):
snake_case__ = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
snake_case__ = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase )
if model_name in ["facebook/bart-base"]:
snake_case__ = 0
snake_case__ = None
snake_case__ = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
model.eval()
snake_case__ = None
snake_case__ = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) )
with torch.no_grad():
snake_case__ = "My friends are cool but they eat too many carbs."
snake_case__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="pt" ).to(model.device )
snake_case__ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=__lowerCAmelCase , )
logger.info("Model exported to {}".format(__lowerCAmelCase ) )
snake_case__ = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(__lowerCAmelCase ) )
snake_case__ = onnxruntime.InferenceSession(__lowerCAmelCase )
snake_case__ = ort_sess.run(
__lowerCAmelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(__lowerCAmelCase ),
"max_length": np.array(__lowerCAmelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = parse_args()
snake_case__ = 5
snake_case__ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case__ = torch.device(args.device )
snake_case__ , snake_case__ = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(__lowerCAmelCase )
if args.max_length:
snake_case__ = args.max_length
if args.num_beams:
snake_case__ = args.num_beams
if args.output_file_path:
snake_case__ = args.output_file_path
else:
snake_case__ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 276
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ : Union[str, Any] = '''src/diffusers'''
A_ : List[Any] = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
A_ : List[str] = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ : Optional[int] = spec.loader.load_module()
def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
return line.startswith(UpperCAmelCase__ ) or len(UpperCAmelCase__ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , UpperCAmelCase__ ) is not None
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
a = object_name.split("." )
a = 0
# First let's find the module where our object lives.
a = parts[i]
while i < len(UpperCAmelCase__ ) and not os.path.isfile(os.path.join(UpperCAmelCase__ , F"""{module}.py""" ) ):
i += 1
if i < len(UpperCAmelCase__ ):
a = os.path.join(UpperCAmelCase__ , parts[i] )
if i >= len(UpperCAmelCase__ ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(UpperCAmelCase__ , F"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
a = f.readlines()
# Now let's find the class / func in the code!
a = ""
a = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase__ ) and re.search(rF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
a = line_index
while line_index < len(UpperCAmelCase__ ) and _should_continue(lines[line_index] , UpperCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a = lines[start_index:line_index]
return "".join(UpperCAmelCase__ )
A_ : List[Any] = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
A_ : List[Any] = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
A_ : int = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
a = code.split("\n" )
a = 0
while idx < len(UpperCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase__ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ):
'''simple docstring'''
a = len(get_indent(UpperCAmelCase__ ) ) > 0
if has_indent:
a = F"""class Bla:\n{code}"""
a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=UpperCAmelCase__ )
a = black.format_str(UpperCAmelCase__ , mode=UpperCAmelCase__ )
a , a = style_docstrings_in_code(UpperCAmelCase__ )
return result[len("class Bla:\n" ) :] if has_indent else result
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :str=False ):
'''simple docstring'''
with open(UpperCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
a = f.readlines()
a = []
a = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase__ ):
a = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
a , a , a = search.groups()
a = find_code_in_diffusers(UpperCAmelCase__ )
a = get_indent(UpperCAmelCase__ )
a = line_index + 1 if indent == theoretical_indent else line_index + 2
a = theoretical_indent
a = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
a = True
while line_index < len(UpperCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
break
a = lines[line_index]
a = _should_continue(UpperCAmelCase__ , UpperCAmelCase__ ) and re.search(F"""^{indent}# End copy""" , UpperCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a = lines[start_index:line_index]
a = "".join(UpperCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
a = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(UpperCAmelCase__ ) is None]
a = "\n".join(UpperCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase__ ) > 0:
a = replace_pattern.replace("with" , "" ).split("," )
a = [_re_replace_pattern.search(UpperCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
a , a , a = pattern.groups()
a = re.sub(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if option.strip() == "all-casing":
a = re.sub(obja.lower() , obja.lower() , UpperCAmelCase__ )
a = re.sub(obja.upper() , obja.upper() , UpperCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
a = blackify(lines[start_index - 1] + theoretical_code )
a = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
a = lines[:start_index] + [theoretical_code] + lines[line_index:]
a = start_index + 1
if overwrite and len(UpperCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase__ )
return diffs
def UpperCAmelCase__ ( UpperCAmelCase__ :bool = False ):
'''simple docstring'''
a = glob.glob(os.path.join(UpperCAmelCase__ , "**/*.py" ) , recursive=UpperCAmelCase__ )
a = []
for filename in all_files:
a = is_copy_consistent(UpperCAmelCase__ , UpperCAmelCase__ )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(UpperCAmelCase__ ) > 0:
a = "\n".join(UpperCAmelCase__ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ : int = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 32
|
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _UpperCAmelCase ( __A : int , __A : Tuple="shi-labs/oneformer_demo" ):
with open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) as f:
a_ : Optional[Any] = json.load(__A )
a_ : List[Any] = {}
a_ : List[Any] = []
a_ : Tuple = []
for key, info in class_info.items():
a_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(__A ) )
a_ : Optional[Any] = thing_ids
a_ : str = class_names
return metadata
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=30 , __SCREAMING_SNAKE_CASE : str=400 , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : str=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=255 , __SCREAMING_SNAKE_CASE : List[Any]="shi-labs/oneformer_demo" , __SCREAMING_SNAKE_CASE : List[str]="ade20k_panoptic.json" , __SCREAMING_SNAKE_CASE : List[Any]=10 , ) -> Dict:
a_ : int = parent
a_ : Optional[Any] = batch_size
a_ : str = num_channels
a_ : Tuple = min_resolution
a_ : List[Any] = max_resolution
a_ : List[Any] = do_resize
a_ : Union[str, Any] = {'''shortest_edge''': 32, '''longest_edge''': 1333} if size is None else size
a_ : Dict = do_normalize
a_ : Union[str, Any] = image_mean
a_ : Dict = image_std
a_ : int = class_info_file
a_ : List[Any] = prepare_metadata(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = num_text
a_ : Any = repo_path
# for the post_process_functions
a_ : List[str] = 2
a_ : Tuple = 10
a_ : Union[str, Any] = 10
a_ : Dict = 3
a_ : int = 4
a_ : Optional[Any] = num_labels
a_ : Union[str, Any] = do_reduce_labels
a_ : Tuple = ignore_index
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=False ) -> Optional[Any]:
if not batched:
a_ : List[Any] = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
a_ , a_ : List[str] = image.size
else:
a_ , a_ : Any = image.shape[1], image.shape[2]
if w < h:
a_ : int = int(self.size['''shortest_edge'''] * h / w )
a_ : Union[str, Any] = self.size['''shortest_edge''']
elif w > h:
a_ : Any = self.size['''shortest_edge''']
a_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
a_ : Optional[int] = self.size['''shortest_edge''']
a_ : int = self.size['''shortest_edge''']
else:
a_ : int = []
for image in image_inputs:
a_ , a_ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a_ : List[str] = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
a_ : str = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case__ = image_processing_class
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Optional[int] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''ignore_index''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''class_info_file''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_text''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''repo_path''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''metadata''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_reduce_labels''' ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
# Initialize image_processor
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : Dict = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : Optional[int] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : Any = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
# Initialize image_processor
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
a_ : List[Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : Tuple = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
# Initialize image_processor
a_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
a_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : int = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Tuple = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : int = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str="np" ) -> Any:
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
a_ : Optional[Any] = self.image_processing_tester.num_labels
a_ : Union[str, Any] = None
a_ : int = None
a_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
a_ : List[str] = num_labels
if is_instance_map:
a_ : str = list(range(__SCREAMING_SNAKE_CASE ) ) * 2
a_ : str = dict(enumerate(__SCREAMING_SNAKE_CASE ) )
a_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
a_ : Any = [Image.fromarray(__SCREAMING_SNAKE_CASE ) for annotation in annotations]
a_ : Dict = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , return_tensors='''pt''' , instance_id_to_semantic_id=__SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE , )
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
def common(__SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[str]=None ):
a_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=__SCREAMING_SNAKE_CASE , is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = inputs['''mask_labels''']
a_ : Any = inputs['''class_labels''']
a_ : Any = inputs['''pixel_values''']
a_ : Optional[Any] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__SCREAMING_SNAKE_CASE )
common(is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type='''pil''' )
common(is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type='''pil''' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ : int = np.zeros((20, 50) )
a_ : Dict = 1
a_ : Optional[Any] = 1
a_ : Dict = 1
a_ : Tuple = binary_mask_to_rle(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
a_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : str = fature_extractor.post_process_semantic_segmentation(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
a_ : Dict = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
a_ : List[str] = fature_extractor.post_process_semantic_segmentation(__SCREAMING_SNAKE_CASE , target_sizes=__SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
a_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : Tuple = image_processor.post_process_instance_segmentation(__SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
a_ : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : int = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : str = image_processor.post_process_panoptic_segmentation(__SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 466
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _UpperCAmelCase ( __A : int , __A : Tuple="shi-labs/oneformer_demo" ):
with open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) as f:
a_ : Optional[Any] = json.load(__A )
a_ : List[Any] = {}
a_ : List[Any] = []
a_ : Tuple = []
for key, info in class_info.items():
a_ : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(__A ) )
a_ : Optional[Any] = thing_ids
a_ : str = class_names
return metadata
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=30 , __SCREAMING_SNAKE_CASE : str=400 , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : str=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=255 , __SCREAMING_SNAKE_CASE : List[Any]="shi-labs/oneformer_demo" , __SCREAMING_SNAKE_CASE : List[str]="ade20k_panoptic.json" , __SCREAMING_SNAKE_CASE : List[Any]=10 , ) -> Dict:
a_ : int = parent
a_ : Optional[Any] = batch_size
a_ : str = num_channels
a_ : Tuple = min_resolution
a_ : List[Any] = max_resolution
a_ : List[Any] = do_resize
a_ : Union[str, Any] = {'''shortest_edge''': 32, '''longest_edge''': 1333} if size is None else size
a_ : Dict = do_normalize
a_ : Union[str, Any] = image_mean
a_ : Dict = image_std
a_ : int = class_info_file
a_ : List[Any] = prepare_metadata(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Optional[int] = num_text
a_ : Any = repo_path
# for the post_process_functions
a_ : List[str] = 2
a_ : Tuple = 10
a_ : Union[str, Any] = 10
a_ : Dict = 3
a_ : int = 4
a_ : Optional[Any] = num_labels
a_ : Union[str, Any] = do_reduce_labels
a_ : Tuple = ignore_index
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=False ) -> Optional[Any]:
if not batched:
a_ : List[Any] = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
a_ , a_ : List[str] = image.size
else:
a_ , a_ : Any = image.shape[1], image.shape[2]
if w < h:
a_ : int = int(self.size['''shortest_edge'''] * h / w )
a_ : Union[str, Any] = self.size['''shortest_edge''']
elif w > h:
a_ : Any = self.size['''shortest_edge''']
a_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
a_ : Optional[int] = self.size['''shortest_edge''']
a_ : int = self.size['''shortest_edge''']
else:
a_ : int = []
for image in image_inputs:
a_ , a_ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a_ : List[str] = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
a_ : str = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case__ = image_processing_class
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Optional[int] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''ignore_index''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''class_info_file''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_text''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''repo_path''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''metadata''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_reduce_labels''' ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
# Initialize image_processor
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
a_ : Dict = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : Optional[int] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : Any = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
# Initialize image_processor
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
a_ : List[Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Union[str, Any] = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : Tuple = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
# Initialize image_processor
a_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
a_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a_ , a_ : int = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ : Tuple = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
a_ : int = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str="np" ) -> Any:
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
a_ : Optional[Any] = self.image_processing_tester.num_labels
a_ : Union[str, Any] = None
a_ : int = None
a_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
a_ : List[str] = num_labels
if is_instance_map:
a_ : str = list(range(__SCREAMING_SNAKE_CASE ) ) * 2
a_ : str = dict(enumerate(__SCREAMING_SNAKE_CASE ) )
a_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
a_ : Any = [Image.fromarray(__SCREAMING_SNAKE_CASE ) for annotation in annotations]
a_ : Dict = image_processor(
__SCREAMING_SNAKE_CASE , ['''semantic'''] * len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , return_tensors='''pt''' , instance_id_to_semantic_id=__SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE , )
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
def common(__SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[str]=None ):
a_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=__SCREAMING_SNAKE_CASE , is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type=__SCREAMING_SNAKE_CASE )
a_ : List[Any] = inputs['''mask_labels''']
a_ : Any = inputs['''class_labels''']
a_ : Any = inputs['''pixel_values''']
a_ : Optional[Any] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__SCREAMING_SNAKE_CASE )
common(is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type='''pil''' )
common(is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type='''pil''' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ : int = np.zeros((20, 50) )
a_ : Dict = 1
a_ : Optional[Any] = 1
a_ : Dict = 1
a_ : Tuple = binary_mask_to_rle(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
a_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : str = fature_extractor.post_process_semantic_segmentation(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
a_ : Dict = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
a_ : List[str] = fature_extractor.post_process_semantic_segmentation(__SCREAMING_SNAKE_CASE , target_sizes=__SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
a_ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : Tuple = image_processor.post_process_instance_segmentation(__SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
a_ : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a_ : int = self.image_processing_tester.get_fake_oneformer_outputs()
a_ : str = image_processor.post_process_panoptic_segmentation(__SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 466
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """poolformer"""
def __init__( self :Optional[int] , __snake_case :int=3 , __snake_case :List[Any]=16 , __snake_case :Union[str, Any]=16 , __snake_case :Any=3 , __snake_case :Optional[int]=4.0 , __snake_case :Optional[int]=[2, 2, 6, 2] , __snake_case :List[str]=[64, 1_28, 3_20, 5_12] , __snake_case :Dict=[7, 3, 3, 3] , __snake_case :Any=[4, 2, 2, 2] , __snake_case :str=[2, 1, 1, 1] , __snake_case :str=4 , __snake_case :str=0.0 , __snake_case :List[Any]="gelu" , __snake_case :Optional[int]=True , __snake_case :Union[str, Any]=1E-5 , __snake_case :Union[str, Any]=0.02 , **__snake_case :int , ):
'''simple docstring'''
__magic_name__ : Optional[int] =num_channels
__magic_name__ : List[str] =patch_size
__magic_name__ : Union[str, Any] =stride
__magic_name__ : Tuple =padding
__magic_name__ : Dict =pool_size
__magic_name__ : List[Any] =hidden_sizes
__magic_name__ : Optional[int] =mlp_ratio
__magic_name__ : Any =depths
__magic_name__ : Any =patch_sizes
__magic_name__ : Dict =strides
__magic_name__ : Any =num_encoder_blocks
__magic_name__ : Optional[Any] =drop_path_rate
__magic_name__ : Optional[int] =hidden_act
__magic_name__ : Optional[int] =use_layer_scale
__magic_name__ : Optional[Any] =layer_scale_init_value
__magic_name__ : Union[str, Any] =initializer_range
super().__init__(**__snake_case )
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 2E-3
| 367
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """t5"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self :Optional[Any] , __snake_case :Optional[Any]=3_21_28 , __snake_case :Optional[int]=5_12 , __snake_case :str=64 , __snake_case :int=20_48 , __snake_case :Union[str, Any]=6 , __snake_case :Union[str, Any]=None , __snake_case :Union[str, Any]=8 , __snake_case :List[str]=32 , __snake_case :Dict=1_28 , __snake_case :List[Any]=0.1 , __snake_case :int=1E-6 , __snake_case :List[str]=1.0 , __snake_case :Optional[int]="relu" , __snake_case :List[str]=True , __snake_case :Union[str, Any]=True , __snake_case :Union[str, Any]=0 , __snake_case :str=1 , **__snake_case :int , ):
'''simple docstring'''
__magic_name__ : str =vocab_size
__magic_name__ : str =d_model
__magic_name__ : Optional[Any] =d_kv
__magic_name__ : int =d_ff
__magic_name__ : Union[str, Any] =num_layers
__magic_name__ : Tuple =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__magic_name__ : List[str] =num_heads
__magic_name__ : Optional[Any] =relative_attention_num_buckets
__magic_name__ : Union[str, Any] =relative_attention_max_distance
__magic_name__ : Dict =dropout_rate
__magic_name__ : Dict =layer_norm_epsilon
__magic_name__ : List[str] =initializer_factor
__magic_name__ : Any =feed_forward_proj
__magic_name__ : Optional[int] =use_cache
__magic_name__ : Tuple =self.feed_forward_proj.split("""-""" )
__magic_name__ : int =act_info[-1]
__magic_name__ : int =act_info[0] == """gated"""
if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__magic_name__ : str ="""gelu_new"""
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , )
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int ={
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__magic_name__ : List[str] ="""past_encoder_sequence + sequence"""
__magic_name__ : List[Any] ={0: """batch"""}
__magic_name__ : Union[str, Any] ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__magic_name__ : int ={0: """batch""", 1: """decoder_sequence"""}
__magic_name__ : Tuple ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
return common_inputs
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 13
| 367
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.