code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class snake_case ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''encodec'''
def __init__( self : int , UpperCamelCase__ : List[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase__ : Any=2_4_0_0_0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=1_2_8 , UpperCamelCase__ : int=3_2 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=[8, 5, 4, 2] , UpperCamelCase__ : Union[str, Any]="weight_norm" , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple="reflect" , UpperCamelCase__ : int=2 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : Any=1_0_2_4 , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : List[str] , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = target_bandwidths
__lowerCAmelCase: Any = sampling_rate
__lowerCAmelCase: int = audio_channels
__lowerCAmelCase: List[Any] = normalize
__lowerCAmelCase: Optional[Any] = chunk_length_s
__lowerCAmelCase: int = overlap
__lowerCAmelCase: List[str] = hidden_size
__lowerCAmelCase: str = num_filters
__lowerCAmelCase: Optional[int] = num_residual_layers
__lowerCAmelCase: Tuple = upsampling_ratios
__lowerCAmelCase: List[Any] = norm_type
__lowerCAmelCase: str = kernel_size
__lowerCAmelCase: List[Any] = last_kernel_size
__lowerCAmelCase: Tuple = residual_kernel_size
__lowerCAmelCase: Union[str, Any] = dilation_growth_rate
__lowerCAmelCase: Tuple = use_causal_conv
__lowerCAmelCase: Any = pad_mode
__lowerCAmelCase: str = compress
__lowerCAmelCase: str = num_lstm_layers
__lowerCAmelCase: Any = trim_right_ratio
__lowerCAmelCase: str = codebook_size
__lowerCAmelCase: Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
__lowerCAmelCase: Dict = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}")
super().__init__(**__lowerCAmelCase)
@property
def lowercase_ ( self : Any)-> Union[str, Any]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def lowercase_ ( self : str)-> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def lowercase_ ( self : Optional[Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def lowercase_ ( self : Optional[int])-> Optional[int]:
'''simple docstring'''
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
| 217 | '''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowercase = logging.getLogger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''sequence-classification'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
if type(__lowerCAmelCase) == dict:
lowerCAmelCase = Namespace(**__lowerCAmelCase)
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return self.model(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowerCAmelCase = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase)
torch.save(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False):
"""simple docstring"""
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
lowerCAmelCase = torch.load(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(__lowerCAmelCase)
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)}
lowerCAmelCase = dict(results.items())
lowerCAmelCase = results
return ret, preds_list, out_label_list
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase)
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def snake_case__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(_A )
lowerCAmelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 272 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
_a : List[Any] = StableDiffusionInstructPixaPixPipeline
_a : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
_a : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_a : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase = CLIPTextModel(__lowerCAmelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('RGB' )
if str(__lowerCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = 'french fries'
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = [inputs['prompt']] * 2
_UpperCAmelCase = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
_UpperCAmelCase = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.permute(0 , 3 , 1 , 2 )
_UpperCAmelCase = image.repeat(2 , 1 , 1 , 1 )
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_UpperCAmelCase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' )
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = [round(__lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(__lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase )
_UpperCAmelCase = VaeImageProcessor(do_resize=__lowerCAmelCase , do_normalize=__lowerCAmelCase )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = pipe(**self.get_dummy_inputs_by_type(__lowerCAmelCase , input_image_type='pt' ) )[0]
_UpperCAmelCase = components['vae']
_UpperCAmelCase = self.get_dummy_inputs_by_type(__lowerCAmelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_UpperCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
_UpperCAmelCase = pipe(**__lowerCAmelCase )[0]
_UpperCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(__lowerCAmelCase , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
_UpperCAmelCase = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__lowerCAmelCase )
_UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__lowerCAmelCase )
_UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = 0
def callback_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
_UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_UpperCAmelCase = False
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
pipe(**__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase = inputs['image'].resize((504, 504) )
_UpperCAmelCase = 'timbrooks/instruct-pix2pix'
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = pipe(**__lowerCAmelCase )
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_UpperCAmelCase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 329 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[str] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
snake_case_ : Optional[int] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
snake_case_ : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
snake_case_ : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
snake_case_ : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
snake_case_ : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
snake_case_ : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
snake_case_ : Dict = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
snake_case_ : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase__ ( lowerCAmelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase__ ( lowerCAmelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ : int = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
snake_case_ : Union[str, Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
snake_case_ : Union[str, Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(lowerCAmelCase__ )
class lowercase__ :
def __call__( self : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Union[str, Any] = None ,lowerCamelCase__ : Any = None ,lowerCamelCase__ : Union[str, Any] = False ,lowerCamelCase__ : Any = False ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Dict = None ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : str ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,**__lowerCAmelCase ,)
elif titles is None or texts is None:
_UpperCamelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
__lowerCAmelCase ,__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,**__lowerCAmelCase ,)
_UpperCamelCase : Union[str, Any] = titles if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else [titles]
_UpperCamelCase : Tuple = texts if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else [texts]
_UpperCamelCase : Dict = len(__lowerCAmelCase )
_UpperCamelCase : Any = questions if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else [questions] * n_passages
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
F'There should be as many titles than texts but got {len(__lowerCAmelCase )} titles and {len(__lowerCAmelCase )} texts.' )
_UpperCamelCase : Optional[int] = super().__call__(__lowerCAmelCase ,__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase )['input_ids']
_UpperCamelCase : Any = super().__call__(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ,padding=__lowerCAmelCase ,truncation=__lowerCAmelCase )['input_ids']
_UpperCamelCase : Tuple = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
}
if return_attention_mask is not False:
_UpperCamelCase : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCamelCase : int = attention_mask
return self.pad(__lowerCAmelCase ,padding=__lowerCAmelCase ,max_length=__lowerCAmelCase ,return_tensors=__lowerCAmelCase )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : str = 16 ,lowerCamelCase__ : Dict = 64 ,lowerCamelCase__ : Dict = 4 ,):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = reader_input['input_ids']
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = reader_output[:3]
_UpperCamelCase : Tuple = len(__lowerCAmelCase )
_UpperCamelCase : Tuple = sorted(range(__lowerCAmelCase ) ,reverse=__lowerCAmelCase ,key=relevance_logits.__getitem__ )
_UpperCamelCase : List[Any] = []
for doc_id in sorted_docs:
_UpperCamelCase : List[str] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCamelCase : Tuple = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCamelCase : int = sequence_ids.index(self.pad_token_id )
else:
_UpperCamelCase : int = len(__lowerCAmelCase )
_UpperCamelCase : str = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=__lowerCAmelCase ,top_spans=__lowerCAmelCase ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=__lowerCAmelCase ,start_index=__lowerCAmelCase ,end_index=__lowerCAmelCase ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(__lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ,):
'''simple docstring'''
_UpperCamelCase : Tuple = []
for start_index, start_score in enumerate(__lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCamelCase : int = sorted(__lowerCAmelCase ,key=lambda lowerCamelCase__ : x[1] ,reverse=__lowerCAmelCase )
_UpperCamelCase : str = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
_UpperCamelCase : Optional[int] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCAmelCase__ )
class lowercase__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = ['''input_ids''', '''attention_mask''']
| 83 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __snake_case ( lowerCAmelCase__ ):
__lowerCamelCase : List[str] = '''transfo-xl'''
__lowerCamelCase : Any = ['''mems''']
__lowerCamelCase : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , snake_case__=26_7735 , snake_case__=[2_0000, 4_0000, 20_0000] , snake_case__=1024 , snake_case__=1024 , snake_case__=16 , snake_case__=64 , snake_case__=4096 , snake_case__=4 , snake_case__=False , snake_case__=18 , snake_case__=1600 , snake_case__=1000 , snake_case__=True , snake_case__=True , snake_case__=0 , snake_case__=-1 , snake_case__=True , snake_case__=0.1 , snake_case__=0.0 , snake_case__=True , snake_case__="normal" , snake_case__=0.01 , snake_case__=0.01 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=0 , **snake_case__ , ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] =vocab_size
UpperCAmelCase : List[Any] =[]
self.cutoffs.extend(__lowerCAmelCase )
if proj_share_all_but_first:
UpperCAmelCase : List[Any] =[False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase : Optional[int] =[False] + [False] * len(self.cutoffs )
UpperCAmelCase : Union[str, Any] =d_model
UpperCAmelCase : Tuple =d_embed
UpperCAmelCase : Union[str, Any] =d_head
UpperCAmelCase : Dict =d_inner
UpperCAmelCase : int =div_val
UpperCAmelCase : Union[str, Any] =pre_lnorm
UpperCAmelCase : str =n_layer
UpperCAmelCase : Optional[int] =n_head
UpperCAmelCase : Optional[int] =mem_len
UpperCAmelCase : Optional[int] =same_length
UpperCAmelCase : Dict =attn_type
UpperCAmelCase : List[str] =clamp_len
UpperCAmelCase : str =sample_softmax
UpperCAmelCase : str =adaptive
UpperCAmelCase : Union[str, Any] =dropout
UpperCAmelCase : List[Any] =dropatt
UpperCAmelCase : List[str] =untie_r
UpperCAmelCase : List[Any] =init
UpperCAmelCase : Dict =init_range
UpperCAmelCase : int =proj_init_std
UpperCAmelCase : Optional[Any] =init_std
UpperCAmelCase : Dict =layer_norm_epsilon
super().__init__(eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 348 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def a__ ( a__ ):
"""simple docstring"""
return EnvironmentCommand()
def a__ ( a__ ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class lowerCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parser.add_parser("""env""" )
download_parser.set_defaults(func=__lowerCAmelCase )
download_parser.add_argument(
"""--accelerate-config_file""" , default=__lowerCAmelCase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = accelerate_config_file
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """not installed"""
if is_safetensors_available():
import safetensors
__SCREAMING_SNAKE_CASE = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
__SCREAMING_SNAKE_CASE = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
__SCREAMING_SNAKE_CASE = """not installed"""
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__SCREAMING_SNAKE_CASE = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = load_config_from_file(self._accelerate_config_file ).to_dict()
__SCREAMING_SNAKE_CASE = (
"""\n""".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else f'\t{accelerate_config}'
)
__SCREAMING_SNAKE_CASE = """not installed"""
__SCREAMING_SNAKE_CASE = """NA"""
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE = torch.__version__
__SCREAMING_SNAKE_CASE = torch.cuda.is_available()
__SCREAMING_SNAKE_CASE = """not installed"""
__SCREAMING_SNAKE_CASE = """NA"""
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE = tf.__version__
try:
# deprecated in v2.1
__SCREAMING_SNAKE_CASE = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__SCREAMING_SNAKE_CASE = bool(tf.config.list_physical_devices("""GPU""" ) )
__SCREAMING_SNAKE_CASE = """not installed"""
__SCREAMING_SNAKE_CASE = """not installed"""
__SCREAMING_SNAKE_CASE = """not installed"""
__SCREAMING_SNAKE_CASE = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
__SCREAMING_SNAKE_CASE = flax.__version__
__SCREAMING_SNAKE_CASE = jax.__version__
__SCREAMING_SNAKE_CASE = jaxlib.__version__
__SCREAMING_SNAKE_CASE = jax.lib.xla_bridge.get_backend().platform
__SCREAMING_SNAKE_CASE = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'{safetensors_version}',
"""Accelerate version""": f'{accelerate_version}',
"""Accelerate config""": f'{accelerate_config_str}',
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""Tensorflow version (GPU?)""": f'{tf_version} ({tf_cuda_available})',
"""Flax version (CPU?/GPU?/TPU?)""": f'{flax_version} ({jax_backend})',
"""Jax version""": f'{jax_version}',
"""JaxLib version""": f'{jaxlib_version}',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__lowerCAmelCase ) )
return info
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Dict ) -> int:
"""simple docstring"""
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 267 | '''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMRobertaTokenizer
UpperCAmelCase_ : int = XLMRobertaTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Optional[int] = True
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """<pad>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(__lowerCAmelCase) , 1002)
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@cached_property
def a_ ( self):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""")
def a_ ( self):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name)
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase)
lowerCAmelCase = pickle.dumps(__lowerCAmelCase)
pickle.loads(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 272 | 0 |
from __future__ import annotations
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_A )
if n > 1:
factors.append(_A )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 | '''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase = b % a, a
return b
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if gcd(_A , _A ) != 1:
lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m
while va != 0:
lowerCAmelCase = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 272 | 0 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def _lowerCamelCase ( lowercase : int , lowercase : List[str] ) -> int:
_a = Mock()
_a = conn, Mock()
_a = iter([1, None] )
_a = lambda lowercase : next(_A )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 63 | '''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272 | 0 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for attribute in key.split('.' ):
__UpperCamelCase = getattr(_A , _A )
if weight_type is not None:
__UpperCamelCase = getattr(_A , _A ).shape
else:
__UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__UpperCamelCase = None
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
elif name.split('.' )[0] == "proj":
__UpperCamelCase = fairseq_model.proj
__UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(_A )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , _A )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
__UpperCamelCase = 'weight'
else:
__UpperCamelCase = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_A )
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(_A , _A , bias=_A )
__UpperCamelCase = emb.weight.data
return lin_layer
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
with open(_A , 'r' , encoding='utf-8' ) as f:
__UpperCamelCase = f.readlines()
__UpperCamelCase = [line.split(' ' )[0] for line in lines]
__UpperCamelCase = len(_A )
__UpperCamelCase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = WavaVecaConfig.from_pretrained(_A )
__UpperCamelCase = SpeechaTextaConfig.from_pretrained(
_A , vocab_size=_A , decoder_layers=_A , do_stable_layer_norm=_A )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__UpperCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
__UpperCamelCase = WavaVecaModel(_A )
__UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , _A )
__UpperCamelCase = SpeechaTextaForCausalLM(_A )
__UpperCamelCase, __UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_A )
# set output linear layer
unexpected_keys.remove('embed_out' )
__UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__UpperCamelCase = SpeechEncoderDecoderModel(encoder=_A , decoder=_A )
__UpperCamelCase = False
# add projection layer
__UpperCamelCase = nn.Parameter(projection_layer.weight )
__UpperCamelCase = nn.Parameter(projection_layer.bias )
__UpperCamelCase = create_vocab_dict(_A )
with open(os.path.join(_A , 'vocab.json' ) , 'w' ) as fp:
json.dump(_A , _A )
__UpperCamelCase = SpeechaTextaTokenizer(os.path.join(_A , 'vocab.json' ) )
tokenizer.save_pretrained(_A )
__UpperCamelCase = hf_wavavec.config.to_dict()
__UpperCamelCase = tokenizer.pad_token_id
__UpperCamelCase = tokenizer.bos_token_id
__UpperCamelCase = tokenizer.eos_token_id
__UpperCamelCase = 'speech_to_text_2'
__UpperCamelCase = 'wav2vec2'
__UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(_A )
hf_wavavec.save_pretrained(_A )
feature_extractor.save_pretrained(_A )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0_2_2_4, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 310 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : int = get_tests_dir('fixtures/test_sentencepiece.model')
_lowerCamelCase : List[str] = {'target_lang': 'fi', 'source_lang': 'en'}
_lowerCamelCase : int = '>>zh<<'
_lowerCamelCase : Optional[int] = 'Helsinki-NLP/'
if is_torch_available():
_lowerCamelCase : List[str] = 'pt'
elif is_tf_available():
_lowerCamelCase : List[str] = 'tf'
else:
_lowerCamelCase : str = 'jax'
@require_sentencepiece
class __UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MarianTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
def A (self : List[str] ):
super().setUp()
A = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
A = Path(self.tmpdirname )
save_json(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
A = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A (self : List[Any] , **_lowerCAmelCase : List[Any] ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A (self : Any , _lowerCAmelCase : Any ):
return (
"This is a test",
"This is a test",
)
def A (self : List[Any] ):
A = """</s>"""
A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def A (self : Dict ):
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__lowerCAmelCase ) , 9 )
def A (self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def A (self : List[str] ):
A = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
A = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
A = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(__lowerCAmelCase , batch.input_ids[0] )
A = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowerCAmelCase )
A = [x.name for x in Path(__lowerCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , __lowerCAmelCase )
MarianTokenizer.from_pretrained(__lowerCAmelCase )
def A (self : Optional[int] ):
A = self.get_tokenizer()
A = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def A (self : Dict ):
A = self.get_tokenizer()
A = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def A (self : Optional[Any] ):
A = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def A (self : List[str] ):
A = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
A = """Tämä on testi"""
A = """This is a test"""
A = [76, 7, 2047, 2]
A = [69, 12, 11, 940, 2]
A = tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
A = tokenizer(text_target=__lowerCAmelCase ).input_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
A = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 258 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 0 |
'''simple docstring'''
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase (__A , __A , __A=0):
"""simple docstring"""
if name is None:
_a = None
else:
_a = '''.''' * max(0 , spaces - 2) + '''# {:''' + str(50 - spaces) + '''s}'''
_a = fmt.format(_A)
# Print and recurse (if needed).
if isinstance(_A , _A):
if msg is not None:
print(_A)
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2)
elif isinstance(_A , torch.Tensor):
print(_A , ''':''' , val.size())
else:
print(_A , ''':''' , _A)
def lowerCAmelCase (__A , __A , __A , __A , __A):
"""simple docstring"""
_a = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_a = (num_heads, hidden_size, num_splits) + input_shape[1:]
_a = param.view(*_A)
_a = param.transpose(0 , 2)
_a = param.transpose(1 , 2).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_a = (num_heads, num_splits, hidden_size) + input_shape[1:]
_a = param.view(*_A)
_a = param.transpose(0 , 1).contiguous()
_a = param.view(*_A)
return param
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = {}
# old versions did not store training args
_a = input_state_dict.get('''args''' , _A)
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_a = ds_args.padded_vocab_size
_a = ds_args.max_position_embeddings
_a = ds_args.hidden_size
_a = ds_args.num_layers
_a = ds_args.num_attention_heads
_a = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_a = config.n_head
# The hidden_size per head.
_a = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_a = input_state_dict['''checkpoint_version''']
else:
_a = 0.0
# The model.
_a = input_state_dict['''model''']
# The language model.
_a = model['''language_model''']
# The embeddings.
_a = lm['''embedding''']
# The word embeddings.
_a = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
_a = word_embeddings[: config.vocab_size, :]
_a = word_embeddings
# The position embeddings.
_a = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_a = pos_embeddings.size(0)
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''')
# Store the position embeddings.
_a = pos_embeddings
# The transformer.
_a = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
_a = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''')
# The simple map of names for "automated" rules.
_a = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_a = layer_re.match(_A)
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_a = int(m.group(1))
# The name of the operation.
_a = m.group(2)
# Is it a weight or a bias?
_a = m.group(3)
# The name of the layer.
_a = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm'''):
_a = '''ln_1''' if op_name.startswith('''input''') else '''ln_2'''
_a = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_a = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa)).view(
1 , 1 , _A , _A)
_a = causal_mask
# Insert a "dummy" tensor for masked_bias.
_a = torch.tensor(-1e4 , dtype=torch.floataa)
_a = masked_bias
_a = fix_query_key_value_ordering(_A , _A , 3 , _A , _A)
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_a = out_val.transpose(0 , 1).contiguous()
# Store.
_a = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_a = fix_query_key_value_ordering(_A , _A , 3 , _A , _A)
# Store. No change of shape.
_a = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_a = megatron_to_transformers[op_name]
_a = val.transpose(0 , 1)
# Copy the bias.
elif weight_or_bias == "bias":
_a = megatron_to_transformers[op_name]
_a = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_a = transformer['''final_layernorm.weight''']
_a = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
_a = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''')
parser.add_argument(
'''path_to_checkpoint''' , type=_A , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=_A , help='''An optional config json file describing the pre-trained model.''' , )
_a = parser.parse_args()
# Extract the basename.
_a = os.path.dirname(args.path_to_checkpoint)
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''')
if args.path_to_checkpoint.endswith('''.zip'''):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''') as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''') as pytorch_dict:
_a = torch.load(_A , map_location='''cpu''')
else:
_a = torch.load(args.path_to_checkpoint , map_location='''cpu''')
_a = input_state_dict.get('''args''' , _A)
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_a = '''gelu_fast'''
elif ds_args.openai_gelu:
_a = '''gelu_new'''
else:
_a = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
_a = '''gelu_new'''
# Spell out all parameters in case the defaults change.
_a = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_a = GPTaConfig.from_json_file(args.config_file)
_a = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''')
_a = convert_megatron_checkpoint(_A , _A , _A)
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A)
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_a = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_a = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
_a = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''')
else:
_a = '''gpt2'''
_a = AutoTokenizer.from_pretrained(_A)
_a = type(_A).__name__
_a = tokenizer_class
# Store the config to file.
print('''Saving config''')
config.save_pretrained(_A)
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''')
tokenizer.save_pretrained(_A)
# Store the state_dict to file.
_a = os.path.join(_A , '''pytorch_model.bin''')
print(F'''Saving checkpoint to \"{output_checkpoint_file}\"''')
torch.save(_A , _A)
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 211 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class a ( lowerCAmelCase__ ):
UpperCamelCase : "DiagonalGaussianDistribution"
class a ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase : str = True
@register_to_config
def __init__( self : Optional[int] , lowerCAmelCase : List[Any] = 3 , lowerCAmelCase : Union[str, Any] = 3 , lowerCAmelCase : Tuple = ("DownEncoderBlock2D",) , lowerCAmelCase : Optional[int] = ("UpDecoderBlock2D",) , lowerCAmelCase : List[Any] = (64,) , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : Tuple = "silu" , lowerCAmelCase : List[str] = 4 , lowerCAmelCase : Optional[int] = 32 , lowerCAmelCase : List[Any] = 32 , lowerCAmelCase : List[Any] = 0.1_8_2_1_5 , ) -> str:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE_: Dict =Encoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , down_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , double_z=__lowerCAmelCase , )
# pass init params to Decoder
SCREAMING_SNAKE_CASE_: Union[str, Any] =Decoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , up_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , act_fn=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Dict =nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
SCREAMING_SNAKE_CASE_: Optional[int] =nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
SCREAMING_SNAKE_CASE_: List[str] =False
SCREAMING_SNAKE_CASE_: Tuple =False
# only relevant if vae tiling is enabled
SCREAMING_SNAKE_CASE_: Optional[Any] =self.config.sample_size
SCREAMING_SNAKE_CASE_: List[str] =(
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
SCREAMING_SNAKE_CASE_: Any =int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =0.2_5
def lowerCamelCase__ ( self : Any , lowerCAmelCase : str , lowerCAmelCase : List[str]=False ) -> int:
'''simple docstring'''
if isinstance(__lowerCAmelCase , (Encoder, Decoder) ):
SCREAMING_SNAKE_CASE_: str =value
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Optional[int] = True ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =use_tiling
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
self.enable_tiling(__lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =True
def lowerCamelCase__ ( self : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ={}
def fn_recursive_add_processors(lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
if hasattr(__lowerCAmelCase , """set_processor""" ):
SCREAMING_SNAKE_CASE_: Optional[int] =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , __lowerCAmelCase , __lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return processors
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =len(self.attn_processors.keys() )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(__lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
if hasattr(__lowerCAmelCase , """set_processor""" ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
module.set_processor(__lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , __lowerCAmelCase , __lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] = True ) -> Tuple:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__lowerCAmelCase , return_dict=__lowerCAmelCase )
if self.use_slicing and x.shape[0] > 1:
SCREAMING_SNAKE_CASE_: Dict =[self.encoder(__lowerCAmelCase ) for x_slice in x.split(1 )]
SCREAMING_SNAKE_CASE_: Any =torch.cat(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: str =self.encoder(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.quant_conv(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =DiagonalGaussianDistribution(__lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] = True ) -> Optional[int]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__lowerCAmelCase , return_dict=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =self.post_quant_conv(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =self.decoder(__lowerCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
@apply_forward_hook
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] = True ) -> List[Any]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
SCREAMING_SNAKE_CASE_: Any =[self._decode(__lowerCAmelCase ).sample for z_slice in z.split(1 )]
SCREAMING_SNAKE_CASE_: int =torch.cat(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: int =self._decode(__lowerCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =min(a.shape[2] , b.shape[2] , __lowerCAmelCase )
for y in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCamelCase__ ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =min(a.shape[3] , b.shape[3] , __lowerCAmelCase )
for x in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Any = True ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE_: Any =int(self.tile_latent_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE_: Any =self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
SCREAMING_SNAKE_CASE_: int =[]
for i in range(0 , x.shape[2] , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict =[]
for j in range(0 , x.shape[3] , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.encoder(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.quant_conv(__lowerCAmelCase )
row.append(__lowerCAmelCase )
rows.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for i, row in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for j, tile in enumerate(__lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE_: Any =self.blend_v(rows[i - 1][j] , __lowerCAmelCase , __lowerCAmelCase )
if j > 0:
SCREAMING_SNAKE_CASE_: List[Any] =self.blend_h(row[j - 1] , __lowerCAmelCase , __lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__lowerCAmelCase , dim=3 ) )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.cat(__lowerCAmelCase , dim=2 )
SCREAMING_SNAKE_CASE_: List[str] =DiagonalGaussianDistribution(__lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCAmelCase )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] = True ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE_: Any =int(self.tile_sample_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE_: Dict =self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
SCREAMING_SNAKE_CASE_: Dict =[]
for i in range(0 , z.shape[2] , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for j in range(0 , z.shape[3] , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
SCREAMING_SNAKE_CASE_: str =self.post_quant_conv(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.decoder(__lowerCAmelCase )
row.append(__lowerCAmelCase )
rows.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =[]
for i, row in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: str =[]
for j, tile in enumerate(__lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE_: Dict =self.blend_v(rows[i - 1][j] , __lowerCAmelCase , __lowerCAmelCase )
if j > 0:
SCREAMING_SNAKE_CASE_: int =self.blend_h(row[j - 1] , __lowerCAmelCase , __lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__lowerCAmelCase , dim=3 ) )
SCREAMING_SNAKE_CASE_: Any =torch.cat(__lowerCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any = False , lowerCAmelCase : Union[str, Any] = True , lowerCAmelCase : Union[str, Any] = None , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =sample
SCREAMING_SNAKE_CASE_: Tuple =self.encode(__lowerCAmelCase ).latent_dist
if sample_posterior:
SCREAMING_SNAKE_CASE_: Optional[int] =posterior.sample(generator=__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: int =posterior.mode()
SCREAMING_SNAKE_CASE_: List[str] =self.decode(__lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
| 173 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""", """False""" ) ) is not True, reason="""Skipping test because should only be run when releasing minor transformers version""", )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCAmelCase , )
assert hasattr(self , "env")
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Any = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
__lowerCAmelCase: str = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCAmelCase , instance_count=__lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCAmelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCAmelCase , py_version="py36" , )
def lowercase_ ( self : Tuple , UpperCamelCase__ : Optional[Any])-> List[str]:
'''simple docstring'''
TrainingJobAnalytics(__lowerCAmelCase).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def lowercase_ ( self : List[str] , UpperCamelCase__ : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: str = self.create_estimator(__lowerCAmelCase)
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase: Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase: Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase: List[str] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase: Any = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCAmelCase)
| 217 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 0 |
from functools import lru_cache
@lru_cache
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 0 |
'''simple docstring'''
snake_case_ : Union[str, Any] = 'Alexander Joslin'
import operator as op
from .stack import Stack
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_UpperCamelCase : Optional[int] = Stack()
_UpperCamelCase : Any = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_A ) )
elif i in operators:
# RULE 2
operator_stack.push(_A )
elif i == ")":
# RULE 4
_UpperCamelCase : List[str] = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase : List[Any] = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase : Optional[int] = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase : Union[str, Any] = operators[opr](_A , _A )
operand_stack.push(_A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
snake_case_ : Union[str, Any] = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 83 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-10 )-> float:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =a
while True:
UpperCAmelCase : Union[str, Any] =Decimal(_A ) - (
Decimal(eval(_A ) ) / Decimal(eval(str(diff(_A ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_A ) ) < precision: # noqa: S307
return float(_A )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 348 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
'''simple docstring'''
from collections import namedtuple
UpperCAmelCase : str = namedtuple('from_to', 'from_ to')
UpperCAmelCase : Dict = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_0_1, 1_0_0_0),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'cubicyard': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'cubicfoot': from_to(0.0_2_8, 3_5.3_1_4_7),
'cup': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(_A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(_A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267 | '''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | 0 |
import os
import string
import sys
a__ : Dict = 1 << 8
a__ : Any = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
a__ : Optional[Any] = KEYMAP['''up''']
a__ : Any = KEYMAP['''left''']
if sys.platform == "win32":
a__ : Any = []
a__ : Optional[Any] = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
a__ : List[str] = ord(str(i))
def UpperCAmelCase_( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
SCREAMING_SNAKE_CASE : Dict = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
SCREAMING_SNAKE_CASE : List[str] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
SCREAMING_SNAKE_CASE : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
SCREAMING_SNAKE_CASE : Optional[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = chr(KEYMAP['''esc'''] )
except KeyError:
SCREAMING_SNAKE_CASE : Optional[Any] = cha[1]
else:
SCREAMING_SNAKE_CASE : Tuple = ch.decode(_A )
else:
SCREAMING_SNAKE_CASE : List[str] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
SCREAMING_SNAKE_CASE : List[Any] = sys.stdin.fileno()
SCREAMING_SNAKE_CASE : Optional[Any] = termios.tcgetattr(_A )
try:
tty.setraw(_A )
SCREAMING_SNAKE_CASE : int = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
SCREAMING_SNAKE_CASE : str = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
SCREAMING_SNAKE_CASE : List[str] = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 313 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = torch.nn.Linear(2 , 4 )
_a = torch.optim.AdamW(model.parameters() , lr=1.0 )
_a = torch.optim.lr_scheduler.OneCycleLR(_A , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
_a = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_a = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _lowerCamelCase ( lowercase : Any ) -> Any:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _lowerCamelCase ( lowercase : str ) -> Any:
_a = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_A )
class __SCREAMING_SNAKE_CASE (lowerCAmelCase__ ):
"""simple docstring"""
@require_cuda
def UpperCamelCase__ ( self : List[Any] ):
_a = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowerCAmelCase ):
_a = Accelerator(cpu=__lowerCAmelCase )
def UpperCamelCase__ ( self : int ):
_a = Accelerator()
_a = GradientState()
assert state.num_steps == 1
_a = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_a = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCamelCase__ ( self : Tuple ):
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def UpperCamelCase__ ( self : Any ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__a : int , **__a : Optional[int] ):
pass
with patch("torch.cuda.set_device" , __lowerCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
_a = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def UpperCamelCase__ ( self : List[str] ):
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_a = get_signature(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1e-3 )
def UpperCamelCase__ ( self : Any ):
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_a = get_signature(__lowerCAmelCase )
# saving hook
def save_config(__a : List[str] , __a : int , __a : Optional[Any] ):
_a = {"class_name": models[0].__class__.__name__}
with open(os.path.join(__lowerCAmelCase , "data.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# loading hook
def load_config(__a : Dict , __a : str ):
with open(os.path.join(__lowerCAmelCase , "data.json" ) , "r" ) as f:
_a = json.load(__lowerCAmelCase )
_a = config["class_name"]
_a = accelerator.register_save_state_pre_hook(__lowerCAmelCase )
_a = accelerator.register_load_state_pre_hook(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
_a = "random"
# make sure loaded weights match with hooks
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
_a = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def UpperCamelCase__ ( self : Dict ):
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
_a = None
# This should work
_a , _a , _a , _a , _a , _a = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(dummy_obj is None )
def UpperCamelCase__ ( self : List[Any] ):
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
_a = [1, 2, 3]
# This should work
_a , _a , _a , _a , _a , _a = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(
getattr(__lowerCAmelCase , "_is_accelerate_prepared" , __lowerCAmelCase ) , __lowerCAmelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(__lowerCAmelCase , "_is_accelerate_prepared" , __lowerCAmelCase ) , __lowerCAmelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__lowerCAmelCase , "_is_accelerate_prepared" , __lowerCAmelCase ) , __lowerCAmelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__lowerCAmelCase , "_is_accelerate_prepared" , __lowerCAmelCase ) , __lowerCAmelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__lowerCAmelCase , "_is_accelerate_prepared" , __lowerCAmelCase ) , __lowerCAmelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__lowerCAmelCase , "_is_accelerate_prepared" , __lowerCAmelCase ) , __lowerCAmelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def UpperCamelCase__ ( self : List[Any] ):
from transformers import AutoModelForCausalLM
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__lowerCAmelCase , device_map={"": 0} , )
_a = Accelerator()
# This should work
_a = accelerator.prepare(__lowerCAmelCase )
@slow
@require_bnb
def UpperCamelCase__ ( self : str ):
from transformers import AutoModelForCausalLM
_a = Accelerator()
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_a = infer_auto_device_map(__lowerCAmelCase )
_a = "cpu"
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=__lowerCAmelCase , load_in_abit=__lowerCAmelCase , llm_inta_enable_fpaa_cpu_offload=__lowerCAmelCase )
# This should not work and get value error
with self.assertRaises(__lowerCAmelCase ):
_a = accelerator.prepare(__lowerCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def UpperCamelCase__ ( self : Optional[Any] ):
from transformers import AutoModelForCausalLM
_a = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_a = infer_auto_device_map(__lowerCAmelCase )
_a = 1
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__lowerCAmelCase , device_map=__lowerCAmelCase , )
_a = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowerCAmelCase ):
_a = accelerator.prepare(__lowerCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCamelCase__ ( self : List[Any] ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
_a = infer_auto_device_map(__lowerCAmelCase )
_a = 1
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__lowerCAmelCase , device_map=__lowerCAmelCase , )
_a = Accelerator()
# This should work
_a = accelerator.prepare(__lowerCAmelCase )
@require_cuda
def UpperCamelCase__ ( self : Any ):
_a = torch.nn.Linear(10 , 10 )
_a = torch.optim.SGD(model.parameters() , lr=0.01 )
_a = Accelerator(cpu=__lowerCAmelCase )
_a = accelerator.prepare(__lowerCAmelCase )
| 63 | '''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 0 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__snake_case = logging.getLogger(__name__)
class __lowerCamelCase (lowerCAmelCase__ ):
_lowercase = '''sequence-classification'''
def __init__( self: Any,A_: Any ):
'''simple docstring'''
if type(__lowerCAmelCase ) == dict:
__UpperCamelCase = Namespace(**__lowerCAmelCase )
__UpperCamelCase = glue_output_modes[hparams.task]
__UpperCamelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase,__lowerCAmelCase,self.mode )
def snake_case_ ( self: Dict,**A_: Dict ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def snake_case_ ( self: Union[str, Any],A_: Optional[int],A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCamelCase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__UpperCamelCase = self(**__lowerCAmelCase )
__UpperCamelCase = outputs[0]
__UpperCamelCase = self.trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.hparams
__UpperCamelCase = processors[args.task]()
__UpperCamelCase = processor.get_labels()
for mode in ["train", "dev"]:
__UpperCamelCase = self._feature_file(__lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s',__lowerCAmelCase )
else:
logger.info('Creating features from dataset file at %s',args.data_dir )
__UpperCamelCase = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
__UpperCamelCase = convert_examples_to_features(
__lowerCAmelCase,self.tokenizer,max_length=args.max_seq_length,label_list=self.labels,output_mode=args.glue_output_mode,)
logger.info('Saving features into cached file %s',__lowerCAmelCase )
torch.save(__lowerCAmelCase,__lowerCAmelCase )
def snake_case_ ( self: int,A_: str,A_: Tuple,A_: Any = False ):
'''simple docstring'''
__UpperCamelCase = 'dev' if mode == 'test' else mode
__UpperCamelCase = self._feature_file(__lowerCAmelCase )
logger.info('Loading features from cached file %s',__lowerCAmelCase )
__UpperCamelCase = torch.load(__lowerCAmelCase )
__UpperCamelCase = torch.tensor([f.input_ids for f in features],dtype=torch.long )
__UpperCamelCase = torch.tensor([f.attention_mask for f in features],dtype=torch.long )
__UpperCamelCase = torch.tensor([f.token_type_ids for f in features],dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__UpperCamelCase = torch.tensor([f.label for f in features],dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__UpperCamelCase = torch.tensor([f.label for f in features],dtype=torch.float )
return DataLoader(
TensorDataset(__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase ),batch_size=__lowerCAmelCase,shuffle=__lowerCAmelCase,)
def snake_case_ ( self: Optional[Any],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCamelCase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__UpperCamelCase = self(**__lowerCAmelCase )
__UpperCamelCase, __UpperCamelCase = outputs[:2]
__UpperCamelCase = logits.detach().cpu().numpy()
__UpperCamelCase = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case_ ( self: Optional[Any],A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
__UpperCamelCase = np.concatenate([x['pred'] for x in outputs],axis=0 )
if self.hparams.glue_output_mode == "classification":
__UpperCamelCase = np.argmax(__lowerCAmelCase,axis=1 )
elif self.hparams.glue_output_mode == "regression":
__UpperCamelCase = np.squeeze(__lowerCAmelCase )
__UpperCamelCase = np.concatenate([x['target'] for x in outputs],axis=0 )
__UpperCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
__UpperCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
__UpperCamelCase = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task,__lowerCAmelCase,__lowerCAmelCase )}
__UpperCamelCase = dict(results.items() )
__UpperCamelCase = results
return ret, preds_list, out_label_list
def snake_case_ ( self: Optional[int],A_: Dict ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self._eval_end(__lowerCAmelCase )
__UpperCamelCase = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case_ ( self: List[str],A_: Tuple ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self._eval_end(__lowerCAmelCase )
__UpperCamelCase = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case_ ( A_: Optional[Any],A_: Dict ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(__lowerCAmelCase,__lowerCAmelCase )
parser.add_argument(
'--max_seq_length',default=128,type=__lowerCAmelCase,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
),)
parser.add_argument(
'--task',default='',type=__lowerCAmelCase,required=__lowerCAmelCase,help='The GLUE task to run',)
parser.add_argument(
'--gpus',default=0,type=__lowerCAmelCase,help='The number of GPUs allocated for this, it is by default 0 meaning none',)
parser.add_argument(
'--overwrite_cache',action='store_true',help='Overwrite the cached training and evaluation sets' )
return parser
def _A ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
__UpperCamelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
__UpperCamelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__UpperCamelCase = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
__UpperCamelCase = GLUETransformer(_A )
__UpperCamelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=_A ) )
__UpperCamelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 310 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=UpperCamelCase__ )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__ )
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
TestCommand.register_subcommand(UpperCamelCase__ )
RunBeamCommand.register_subcommand(UpperCamelCase__ )
DummyDataCommand.register_subcommand(UpperCamelCase__ )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(UpperCamelCase__ , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(UpperCamelCase__ )
# Run
UpperCAmelCase = args.func(UpperCamelCase__ , **UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 273 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = BlipImageProcessor()
UpperCAmelCase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCAmelCase = BlipaProcessor(_A , _A )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self , **_A ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).tokenizer
def _lowercase ( self , **_A ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def _lowercase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
UpperCAmelCase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipaProcessor(tokenizer=_A , image_processor=_A )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(_A , return_tensors='''np''' )
UpperCAmelCase = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipaProcessor(tokenizer=_A , image_processor=_A )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = processor(text=_A )
UpperCAmelCase = tokenizer(_A , return_token_type_ids=_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipaProcessor(tokenizer=_A , image_processor=_A )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipaProcessor(tokenizer=_A , image_processor=_A )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(_A )
UpperCAmelCase = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipaProcessor(tokenizer=_A , image_processor=_A )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=_A , images=_A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 273 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 273 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__A : int = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__=True ) -> List[str]:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=a_ ) )
class A_ (a_ ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def _lowercase ( self , _A , _A ):
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase = dataset_module_factory(_A , cache_dir=_A )
UpperCAmelCase = import_main_class(dataset_module.module_path , dataset=_A )
UpperCAmelCase = builder_cls(
cache_dir=_A , config_name=_A , hash=dataset_module.hash , )
UpperCAmelCase = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_A ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
UpperCAmelCase = cached_path(_A , cache_dir=_A )
self.assertTrue(os.path.exists(_A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
UpperCAmelCase = dataset_module_factory('''wikipedia''' , cache_dir=UpperCamelCase__ )
UpperCAmelCase = import_main_class(dataset_module.module_path )
UpperCAmelCase = builder_cls(
cache_dir=UpperCamelCase__ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase = None
builder_instance.download_and_prepare()
UpperCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = dataset_module_factory('''wikipedia''' , cache_dir=UpperCamelCase__ )
UpperCAmelCase = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
UpperCAmelCase = builder_cls(
cache_dir=UpperCamelCase__ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
UpperCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['''train'''] , UpperCamelCase__ )
assert next(iter(ds['''train'''] ) )
| 273 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowercase ( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = True
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __call__( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(_A ) for feature in features]
UpperCAmelCase = len(_A )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
UpperCAmelCase = list(chain(*_A ) )
UpperCAmelCase = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 273 | 1 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> float:
'''simple docstring'''
UpperCAmelCase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
UpperCAmelCase = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__A : Dict = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 273 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__A : Optional[Any] = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
__A : Optional[Any] = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
__A : Optional[int] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return float((preds == labels).mean() )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = float(fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = float(pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] )
UpperCAmelCase = float(spearmanr(UpperCamelCase__ , UpperCamelCase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
def _lowercase ( self ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_A , _A )}
elif self.config_name == "stsb":
return pearson_and_spearman(_A , _A )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_A , _A )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_A , _A )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 273 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[int] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
UpperCAmelCase = MaskFormerConfig(backbone_config=UpperCamelCase__ )
UpperCAmelCase = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase = 847
UpperCAmelCase = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
UpperCAmelCase = 150
UpperCAmelCase = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase = 171
UpperCAmelCase = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
UpperCAmelCase = 133
UpperCAmelCase = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase = 19
UpperCAmelCase = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase = 65
UpperCAmelCase = '''mapillary-vistas-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = dct.pop(UpperCamelCase__ )
UpperCAmelCase = val
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[:dim, :]
UpperCAmelCase = in_proj_bias[: dim]
UpperCAmelCase = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase = in_proj_weight[
-dim :, :
]
UpperCAmelCase = in_proj_bias[-dim :]
# fmt: on
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: hidden_size, :]
UpperCAmelCase = in_proj_bias[:config.hidden_size]
UpperCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase = in_proj_weight[-hidden_size :, :]
UpperCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: hidden_size, :]
UpperCAmelCase = in_proj_bias[:config.hidden_size]
UpperCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase = in_proj_weight[-hidden_size :, :]
UpperCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def __SCREAMING_SNAKE_CASE ( ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = get_maskformer_config(UpperCamelCase__ )
# load original state_dict
with open(UpperCamelCase__ , '''rb''' ) as f:
UpperCAmelCase = pickle.load(UpperCamelCase__ )
UpperCAmelCase = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
# load 🤗 model
UpperCAmelCase = MaskFormerForInstanceSegmentation(UpperCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase__ , param.shape )
UpperCAmelCase , UpperCAmelCase = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase__ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase = prepare_img()
if "vistas" in model_name:
UpperCAmelCase = 65
elif "cityscapes" in model_name:
UpperCAmelCase = 6_5535
else:
UpperCAmelCase = 255
UpperCAmelCase = True if '''ade''' in model_name else False
UpperCAmelCase = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ )
UpperCAmelCase = image_processor(UpperCamelCase__ , return_tensors='''pt''' )
UpperCAmelCase = model(**UpperCamelCase__ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A : str = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 273 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 1 |
__A : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = [False] * len(UpperCamelCase__ )
UpperCAmelCase = [s]
UpperCAmelCase = True
while queue:
UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase__ )
UpperCAmelCase = True
UpperCAmelCase = u
return visited[t]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = [-1] * (len(UpperCamelCase__ ))
UpperCAmelCase = 0
UpperCAmelCase = []
UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = float('''Inf''' )
UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase = min(UpperCamelCase__ , graph[parent[s]][s] )
UpperCAmelCase = parent[s]
max_flow += path_flow
UpperCAmelCase = sink
while v != source:
UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase = parent[v]
for i in range(len(UpperCamelCase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 273 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 1 |
from ...processing_utils import ProcessorMixin
class A_ (a_ ):
UpperCAmelCase__ = ['''image_processor''', '''feature_extractor''']
UpperCAmelCase__ = '''TvltImageProcessor'''
UpperCAmelCase__ = '''TvltFeatureExtractor'''
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__(image_processor=_A , feature_extractor=_A )
UpperCAmelCase = image_processor
UpperCAmelCase = feature_extractor
def __call__( self , _A=None , _A=None , _A=None , _A=None , _A=False , _A=False , *_A , **_A , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
UpperCAmelCase = None
if images is not None:
UpperCAmelCase = self.image_processor(_A , mask_pixel=_A , *_A , **_A )
if images_mixed is not None:
UpperCAmelCase = self.image_processor(_A , is_mixed=_A , *_A , **_A )
if audio is not None:
UpperCAmelCase = self.feature_extractor(
_A , *_A , sampling_rate=_A , mask_audio=_A , **_A )
UpperCAmelCase = {}
if audio is not None:
output_dict.update(_A )
if images is not None:
output_dict.update(_A )
if images_mixed_dict is not None:
output_dict.update(_A )
return output_dict
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.image_processor.model_input_names
UpperCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 273 |
from __future__ import annotations
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float:
'''simple docstring'''
UpperCAmelCase = x_start
UpperCAmelCase = fnc(UpperCamelCase__ )
UpperCAmelCase = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase = (x_end - x_start) / steps + xa
UpperCAmelCase = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase = xa
UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
__A : List[Any] = 10
while i <= 100_000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 273 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__A : Union[str, Any] = ["text", "image", "audio"]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
inputs.append(create_inputs(UpperCamelCase__ ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = []
for output in outputs:
if isinstance(UpperCamelCase__ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(UpperCamelCase__ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(UpperCamelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class A_ :
def _lowercase ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
UpperCAmelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input , _A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = self.tool(*_A )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase = [outputs]
self.assertListEqual(output_types(_A ) , self.tool.outputs )
def _lowercase ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = self.tool(*_A )
if not isinstance(_A , _A ):
UpperCAmelCase = [outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
for output, output_type in zip(_A , self.tool.outputs ):
UpperCAmelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_A , _A ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = []
for _input, input_type in zip(_A , self.tool.inputs ):
if isinstance(_A , _A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase = self.tool(*_A )
if not isinstance(_A , _A ):
UpperCAmelCase = [outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
| 273 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Dict = logging.get_logger(__name__)
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_A )
UpperCAmelCase = do_lower_case
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 273 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=UpperCamelCase__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=UpperCamelCase__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=UpperCamelCase__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
UpperCAmelCase = parse_args()
# Import training_script as a module.
UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase = script_fpath.stem
UpperCAmelCase = importlib.import_module(UpperCamelCase__ )
# Patch sys.argv
UpperCAmelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 273 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A : int = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = list(s_dict.keys() )
for key in keys:
UpperCAmelCase = R'''.*/layers_(\d+)'''
UpperCAmelCase = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ )
UpperCAmelCase = R'''(encoder|decoder)\/'''
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ )
elif groups[0] == "decoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase = s_dict[key].shape[0]
UpperCAmelCase = s_dict[key]
for idx in range(UpperCamelCase__ ):
UpperCAmelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase__ )
return s_dict
__A : Optional[int] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ )
UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ )
UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0]
UpperCAmelCase = str(activation[1] )
UpperCAmelCase = num_experts
UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
UpperCAmelCase = flax_params['''target''']
UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
UpperCAmelCase = rename_keys(UpperCamelCase__ )
UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
for j in range(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> list[float]:
'''simple docstring'''
UpperCAmelCase = [float('''inf''' )] * vertex_count
UpperCAmelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCAmelCase = distance[u] + w
UpperCAmelCase = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter number of vertices: ").strip())
__A : Optional[int] = int(input("Enter number of edges: ").strip())
__A : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
__A , __A , __A : str = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
__A : List[Any] = {"src": src, "dst": dest, "weight": weight}
__A : int = int(input("\nEnter shortest path source:").strip())
__A : Union[str, Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 273 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 1 |
import argparse
import os
import re
import packaging.version
__A : Optional[Any] = "examples/"
__A : List[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__A : Dict = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
__A : List[str] = "README.md"
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=False ) -> Any:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__A : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 273 |
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple:
'''simple docstring'''
UpperCAmelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : int = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''bridgetower_vision_model'''
def __init__( self , _A=7_6_8 , _A=1_2 , _A=3 , _A=1_6 , _A=2_8_8 , _A=1 , _A=1E-05 , _A=False , _A=True , _A=False , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = image_size
UpperCAmelCase = initializer_factor
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = stop_gradient
UpperCAmelCase = share_layernorm
UpperCAmelCase = remove_last_layer
@classmethod
def _lowercase ( cls , _A , **_A ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(_A , **_A )
if config_dict.get('''model_type''' ) == "bridgetower":
UpperCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class A_ (a_ ):
UpperCAmelCase__ = '''bridgetower_text_model'''
def __init__( self , _A=5_0_2_6_5 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=1 , _A=3_0_7_2 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_4 , _A=1 , _A=1E-05 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_factor
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
@classmethod
def _lowercase ( cls , _A , **_A ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(_A , **_A )
if config_dict.get('''model_type''' ) == "bridgetower":
UpperCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class A_ (a_ ):
UpperCAmelCase__ = '''bridgetower'''
def __init__( self , _A=True , _A="gelu" , _A=7_6_8 , _A=1 , _A=1E-05 , _A=False , _A="add" , _A=1_2 , _A=6 , _A=False , _A=False , _A=None , _A=None , **_A , ):
'''simple docstring'''
UpperCAmelCase = kwargs.pop('''text_config_dict''' , _A )
UpperCAmelCase = kwargs.pop('''vision_config_dict''' , _A )
super().__init__(**_A )
UpperCAmelCase = share_cross_modal_transformer_layers
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_size
UpperCAmelCase = initializer_factor
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = share_link_tower_layers
UpperCAmelCase = link_tower_type
UpperCAmelCase = num_attention_heads
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = tie_word_embeddings
UpperCAmelCase = init_layernorm_from_vision_encoder
if text_config is None:
UpperCAmelCase = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
UpperCAmelCase = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
UpperCAmelCase = BridgeTowerTextConfig(**_A )
UpperCAmelCase = BridgeTowerVisionConfig(**_A )
@classmethod
def _lowercase ( cls , _A , _A , **_A ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 273 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Optional[Any] = logging.get_logger(__name__)
__A : Optional[int] = {"tokenizer_file": "tokenizer.json"}
__A : Union[str, Any] = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = None
def __init__( self , _A=None , _A=None , _A=None , _A="<unk>" , _A="<s>" , _A="</s>" , _A="<pad>" , _A=False , _A=False , **_A , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , add_prefix_space=_A , clean_up_tokenization_spaces=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**_A )
UpperCAmelCase = add_prefix_space
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
UpperCAmelCase = kwargs.get('''is_split_into_words''' , _A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
UpperCAmelCase = kwargs.get('''is_split_into_words''' , _A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 273 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 1 |
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCAmelCase = 6
UpperCAmelCase = 1
UpperCAmelCase = 1901
UpperCAmelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCAmelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCAmelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCAmelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCAmelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 273 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Dict = logging.get_logger(__name__)
__A : str = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''longformer'''
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class A_ (a_ ):
def __init__( self , _A , _A = "default" , _A = None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
UpperCAmelCase = True
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
'''simple docstring'''
return 1E-4
@property
def _lowercase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 273 | 1 |
from scipy.stats import spearmanr
import datasets
__A : Dict = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
__A : Union[str, Any] = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
__A : str = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
def _lowercase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def _lowercase ( self , _A , _A , _A=False ):
'''simple docstring'''
UpperCAmelCase = spearmanr(_A , _A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 273 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ):
'''simple docstring'''
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase = self.scheduler.schedule[t]
UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase = self.scheduler.step_correct(
_A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , )
UpperCAmelCase = step_output.prev_sample
UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 273 | 1 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
UpperCAmelCase = model.state_dict()
def to_tf_var_name(UpperCamelCase__ ):
for patt, repl in iter(UpperCamelCase__ ):
UpperCAmelCase = name.replace(UpperCamelCase__ , UpperCamelCase__ )
return F"""bert/{name}"""
def create_tf_var(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase = tf.get_variable(dtype=UpperCamelCase__ , shape=tensor.shape , name=UpperCamelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase = to_tf_var_name(UpperCamelCase__ )
UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase = torch_tensor.T
UpperCAmelCase = create_tf_var(tensor=UpperCamelCase__ , name=UpperCamelCase__ , session=UpperCamelCase__ )
tf.keras.backend.set_value(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = session.run(UpperCamelCase__ )
print(F"""Successfully created {tf_name}: {np.allclose(UpperCamelCase__ , UpperCamelCase__ )}""" )
UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase = parser.parse_args(UpperCamelCase__ )
UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 273 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A : str = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(_A )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowercase ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
| 273 | 1 |
import gc
import threading
import time
import psutil
import torch
class A_ :
def __init__( self ):
'''simple docstring'''
UpperCAmelCase = psutil.Process()
UpperCAmelCase = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = -1
while True:
UpperCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = True
UpperCAmelCase = threading.Thread(target=self.peak_monitor )
UpperCAmelCase = True
self.thread.start()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
__A : str = PeakCPUMemory()
def __SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
UpperCAmelCase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase = torch.cuda.memory_allocated(UpperCamelCase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
UpperCAmelCase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase = (torch.cuda.memory_allocated(UpperCamelCase__ ) - start_measures[str(UpperCamelCase__ )]) / 2**20
UpperCAmelCase = (torch.cuda.max_memory_allocated(UpperCamelCase__ ) - start_measures[str(UpperCamelCase__ )]) / 2**20
return measures
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(UpperCamelCase__ )]:.2f}MiB""" )
UpperCAmelCase = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 273 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase = args.pruning_method
UpperCAmelCase = args.threshold
UpperCAmelCase = args.model_name_or_path.rstrip('''/''' )
UpperCAmelCase = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
UpperCAmelCase = torch.load(os.path.join(UpperCamelCase__ , '''pytorch_model.bin''' ) )
UpperCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCAmelCase = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
UpperCAmelCase = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
UpperCAmelCase = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
UpperCAmelCase = MagnitudeBinarizer.apply(inputs=UpperCamelCase__ , threshold=UpperCamelCase__ )
UpperCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCAmelCase = name[:-6]
UpperCAmelCase = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase = TopKBinarizer.apply(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCAmelCase = name[:-6]
UpperCAmelCase = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase = ThresholdBinarizer.apply(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCAmelCase = name[:-6]
UpperCAmelCase = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase , UpperCAmelCase = -0.1, 1.1
UpperCAmelCase = torch.sigmoid(UpperCamelCase__ )
UpperCAmelCase = s * (r - l) + l
UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
UpperCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
UpperCAmelCase = os.path.join(
os.path.dirname(UpperCamelCase__ ) , F"""bertarized_{os.path.basename(UpperCamelCase__ )}""" )
if not os.path.isdir(UpperCamelCase__ ):
shutil.copytree(UpperCamelCase__ , UpperCamelCase__ )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__A : int = parser.parse_args()
main(args)
| 273 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 273 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : List[Any] = logging.get_logger(__name__)
__A : str = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__A : Optional[int] = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__A : List[str] = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(UpperCamelCase__ ):
UpperCAmelCase = b
UpperCAmelCase = idx
for wd in b:
UpperCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|startoftext|>" , _A="<|endoftext|>" , _A=False , **_A , ):
'''simple docstring'''
super().__init__(
unk_token=_A , pad_token=_A , bos_token=_A , eos_token=_A , do_clean_text=_A , **_A , )
if not os.path.isfile(_A ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_A ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
UpperCAmelCase = do_clean_text
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_vocab_and_emoji(_A , _A )
UpperCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _lowercase ( self ):
'''simple docstring'''
return len(self.raw_vocab )
def _lowercase ( self ):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _lowercase ( self , _A ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(_A , clean=self.do_clean_text )
def _lowercase ( self , _A ):
'''simple docstring'''
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def _lowercase ( self , _A ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_A )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = ''''''.join(_A ).strip()
return out_string
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = 0
if os.path.isdir(_A ):
UpperCAmelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase = token_index
writer.write(''','''.join(_A ) + '''\n''' )
index += 1
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _A )
return vocab_file, emoji_file
class A_ (a_ ):
def __init__( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = vocab # same as swe
UpperCAmelCase = ids_to_tokens # same as bpe
UpperCAmelCase = emoji
UpperCAmelCase = np.max([len(_A ) for w in self.vocab.keys()] )
UpperCAmelCase = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
UpperCAmelCase = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
UpperCAmelCase = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
UpperCAmelCase = re.compile(
r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
UpperCAmelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
UpperCAmelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
UpperCAmelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ):
'''simple docstring'''
return len(self.ids_to_tokens )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = self.content_repattera.sub('''<URL>''' , _A )
UpperCAmelCase = self.content_repattera.sub('''<EMAIL>''' , _A )
UpperCAmelCase = self.content_repattera.sub('''<TEL>''' , _A )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _A )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _A )
UpperCAmelCase = self.content_repattera.sub('''<PRICE>''' , _A )
UpperCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def _lowercase ( self , _A , _A=False ):
'''simple docstring'''
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace('''\r\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\r''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\t''' , '''<TAB>''' )
UpperCAmelCase = text.replace('''—''' , '''ー''' )
UpperCAmelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase = text.replace(_A , _A )
if clean:
UpperCAmelCase = self.clean_text(_A )
def check_simbol(_A ):
UpperCAmelCase = x.encode()
if len(_A ) == 1 and len(_A ) == 2:
UpperCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(_A ):
UpperCAmelCase = x.encode()
if len(_A ) == 1 and len(_A ) == 3:
UpperCAmelCase = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
UpperCAmelCase = 0
UpperCAmelCase = []
while pos < len(_A ):
UpperCAmelCase = min(len(_A ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
UpperCAmelCase = [] # (token_id, token, pos)
for e in range(_A , _A , -1 ):
UpperCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_A ) > 2:
UpperCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_A ) > 0:
# the smallest token_id is adopted
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = sorted(_A , key=lambda _A : x[0] )[0]
result.append(_A )
UpperCAmelCase = e
else:
UpperCAmelCase = pos + 1
UpperCAmelCase = text[pos:end]
if check_simbol(_A ):
result.append('''<KIGOU>''' )
elif checkuae(_A ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
UpperCAmelCase = end
return result
def _lowercase ( self , _A , _A="\n" ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_A ) > 0:
words.append(bytearray(_A ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_A )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_A )
if len(_A ) > 0:
words.append(bytearray(_A ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = ''''''.join(_A )
return text
| 273 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__A : Dict = 50_000
__A : str = 5_000
__A , __A : List[Any] = os.path.split(__file__)
__A : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
for i in range(UpperCamelCase__ ):
UpperCAmelCase = dataset[i]
@get_duration
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
UpperCAmelCase = dataset[i : i + batch_size]
@get_duration
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
UpperCAmelCase = dataset[i]
@get_duration
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = dataset[i : i + batch_size]
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
UpperCAmelCase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
UpperCAmelCase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
UpperCAmelCase = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
UpperCAmelCase = generate_example_dataset(
os.path.join(UpperCamelCase__ , '''dataset.arrow''' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(UpperCamelCase__ ) )
UpperCAmelCase = func(UpperCamelCase__ , **UpperCamelCase__ )
print('''shuffling dataset''' )
UpperCAmelCase = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(UpperCamelCase__ ) )
UpperCAmelCase = func(
UpperCamelCase__ , **UpperCamelCase__ )
with open(UpperCamelCase__ , '''wb''' ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 273 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (a_ ):
UpperCAmelCase__ = (UnCLIPScheduler,)
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_A )
return config
def _lowercase ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_A )
def _lowercase ( self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_A )
def _lowercase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def _lowercase ( self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=_A )
def _lowercase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_A )
def _lowercase ( self ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_A , prev_timestep=_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(variance_type='''fixed_small_log''' )
UpperCAmelCase = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_99_49_87 ) ) < 1E-5
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(variance_type='''learned_range''' )
UpperCAmelCase = scheduler_class(**_A )
UpperCAmelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_A ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=_A ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=_A ) - -0.0_01_00_11 < 1E-5
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**_A )
UpperCAmelCase = scheduler.timesteps
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
UpperCAmelCase = model(_A , _A )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(_A ) )
UpperCAmelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**_A )
scheduler.set_timesteps(2_5 )
UpperCAmelCase = scheduler.timesteps
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
UpperCAmelCase = model(_A , _A )
if i + 1 == timesteps.shape[0]:
UpperCAmelCase = None
else:
UpperCAmelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(
_A , _A , _A , prev_timestep=_A , generator=_A ).prev_sample
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(_A ) )
UpperCAmelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
pass
| 273 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ):
'''simple docstring'''
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase = self.scheduler.schedule[t]
UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase = self.scheduler.step_correct(
_A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , )
UpperCAmelCase = step_output.prev_sample
UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 273 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 273 | 1 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
if num <= 0:
UpperCAmelCase = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(UpperCamelCase__ )
UpperCAmelCase = [True] * (num + 1)
UpperCAmelCase = []
UpperCAmelCase = 2
UpperCAmelCase = int(math.sqrt(UpperCamelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(UpperCamelCase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , UpperCamelCase__ ):
if sieve[i] is True:
UpperCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(UpperCamelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 273 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowercase ( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = True
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __call__( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(_A ) for feature in features]
UpperCAmelCase = len(_A )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
UpperCAmelCase = list(chain(*_A ) )
UpperCAmelCase = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 273 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__A : List[Any] = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273 | 1 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
UpperCAmelCase = len(UpperCamelCase__ )
UpperCAmelCase = max(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
UpperCAmelCase = coll_max + 1 - coll_min
UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__A : str = input("Enter numbers separated by a comma:\n").strip()
__A : Tuple = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 273 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : str = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''gptsan-japanese'''
UpperCAmelCase__ = [
'''past_key_values''',
]
UpperCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _A=3_6_0_0_0 , _A=1_2_8_0 , _A=1_0_2_4 , _A=8_1_9_2 , _A=4_0_9_6 , _A=1_2_8 , _A=1_0 , _A=0 , _A=1_6 , _A=1_6 , _A=1_2_8 , _A=0.0 , _A=1E-5 , _A=False , _A=0.0 , _A="float32" , _A=False , _A=False , _A=False , _A=0.0_02 , _A=False , _A=True , _A=3_5_9_9_8 , _A=3_5_9_9_5 , _A=3_5_9_9_9 , **_A , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = d_ff
UpperCAmelCase = d_ext
UpperCAmelCase = d_spout
UpperCAmelCase = num_switch_layers
UpperCAmelCase = num_ext_layers
UpperCAmelCase = num_switch_layers + num_ext_layers
UpperCAmelCase = num_heads
UpperCAmelCase = num_experts
UpperCAmelCase = expert_capacity
UpperCAmelCase = dropout_rate
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = router_bias
UpperCAmelCase = router_jitter_noise
UpperCAmelCase = router_dtype
UpperCAmelCase = router_ignore_padding_tokens
UpperCAmelCase = output_hidden_states
UpperCAmelCase = output_attentions
UpperCAmelCase = initializer_factor
UpperCAmelCase = output_router_logits
UpperCAmelCase = use_cache
super().__init__(
separator_token_id=_A , pad_token_id=_A , eos_token_id=_A , **_A , )
| 273 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 1 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class A_ (a_ ):
def __get__( self , _A , _A=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase = '''__cached_''' + self.fget.__name__
UpperCAmelCase = getattr(_A , _A , _A )
if cached is None:
UpperCAmelCase = self.fget(_A )
setattr(_A , _A , _A )
return cached
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if is_torch_fx_proxy(UpperCamelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase__ , np.ndarray )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
return isinstance(UpperCamelCase__ , np.ndarray )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return _is_numpy(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import torch
return isinstance(UpperCamelCase__ , torch.Tensor )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
import torch
return isinstance(UpperCamelCase__ , torch.device )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
else:
return False
return isinstance(UpperCamelCase__ , torch.dtype )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
import tensorflow as tf
return isinstance(UpperCamelCase__ , tf.Tensor )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCamelCase__ )
return type(UpperCamelCase__ ) == tf.Tensor
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase__ , jnp.ndarray )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
if isinstance(UpperCamelCase__ , (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_py_obj(UpperCamelCase__ ) for o in obj]
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ ).tolist()
elif isinstance(UpperCamelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
if isinstance(UpperCamelCase__ , (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return np.array(UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ )
else:
return obj
class A_ (a_ ):
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = fields(self )
# Safety and consistency checks
if not len(_A ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
UpperCAmelCase = getattr(self , class_fields[0].name )
UpperCAmelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_A ):
if isinstance(_A , _A ):
UpperCAmelCase = first_field.items()
UpperCAmelCase = True
else:
try:
UpperCAmelCase = iter(_A )
UpperCAmelCase = True
except TypeError:
UpperCAmelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_A ):
if (
not isinstance(_A , (list, tuple) )
or not len(_A ) == 2
or not isinstance(element[0] , _A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCAmelCase = element[1]
elif first_field is not None:
UpperCAmelCase = first_field
else:
for field in class_fields:
UpperCAmelCase = getattr(self , field.name )
if v is not None:
UpperCAmelCase = v
def __delitem__( self , *_A , **_A ):
'''simple docstring'''
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , _A ):
'''simple docstring'''
if isinstance(_A , _A ):
UpperCAmelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , _A , _A ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_A , _A )
super().__setattr__(_A , _A )
def __setitem__( self , _A , _A ):
'''simple docstring'''
super().__setitem__(_A , _A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_A , _A )
def _lowercase ( self ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class A_ (a_ , a_ ):
@classmethod
def _lowercase ( cls , _A ):
'''simple docstring'''
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class A_ (a_ ):
UpperCAmelCase__ = '''longest'''
UpperCAmelCase__ = '''max_length'''
UpperCAmelCase__ = '''do_not_pad'''
class A_ (a_ ):
UpperCAmelCase__ = '''pt'''
UpperCAmelCase__ = '''tf'''
UpperCAmelCase__ = '''np'''
UpperCAmelCase__ = '''jax'''
class A_ :
def __init__( self , _A ):
'''simple docstring'''
UpperCAmelCase = context_managers
UpperCAmelCase = ExitStack()
def __enter__( self ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_A )
def __exit__( self , *_A , **_A ):
'''simple docstring'''
self.stack.__exit__(*_A , **_A )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = infer_framework(UpperCamelCase__ )
if framework == "tf":
UpperCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = model_class.__name__
UpperCAmelCase = infer_framework(UpperCamelCase__ )
if framework == "tf":
UpperCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ = "" , UpperCamelCase__ = "." ) -> Tuple:
'''simple docstring'''
def _flatten_dict(UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__="." ):
for k, v in d.items():
UpperCAmelCase = str(UpperCamelCase__ ) + delimiter + str(UpperCamelCase__ ) if parent_key else k
if v and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
yield from flatten_dict(UpperCamelCase__ , UpperCamelCase__ , delimiter=UpperCamelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
@contextmanager
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ = False ) -> Any:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
if is_numpy_array(UpperCamelCase__ ):
return np.transpose(UpperCamelCase__ , axes=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.T if axes is None else array.permute(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.transpose(UpperCamelCase__ , perm=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.transpose(UpperCamelCase__ , axes=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for transpose: {type(UpperCamelCase__ )}.""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if is_numpy_array(UpperCamelCase__ ):
return np.reshape(UpperCamelCase__ , UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.reshape(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.reshape(UpperCamelCase__ , UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.reshape(UpperCamelCase__ , UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for reshape: {type(UpperCamelCase__ )}.""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(UpperCamelCase__ ):
return np.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(UpperCamelCase__ )}.""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if is_numpy_array(UpperCamelCase__ ):
return np.expand_dims(UpperCamelCase__ , UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.unsqueeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.expand_dims(UpperCamelCase__ , axis=UpperCamelCase__ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase__ )}.""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
if is_numpy_array(UpperCamelCase__ ):
return np.size(UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.numel()
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.size(UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase__ )}.""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(UpperCamelCase__ , (tuple, list) ):
UpperCAmelCase = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase = F"""{repo_id}--{value}"""
return auto_map
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
for base_class in inspect.getmro(UpperCamelCase__ ):
UpperCAmelCase = base_class.__module__
UpperCAmelCase = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 273 |
from __future__ import annotations
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float:
'''simple docstring'''
UpperCAmelCase = x_start
UpperCAmelCase = fnc(UpperCamelCase__ )
UpperCAmelCase = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase = (x_end - x_start) / steps + xa
UpperCAmelCase = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase = xa
UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
__A : List[Any] = 10
while i <= 100_000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 273 | 1 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A_ (a_ ):
def __init__( self , _A , _A=None , _A=None , _A=0 ):
'''simple docstring'''
UpperCAmelCase = 1.0 if scale is None else scale
UpperCAmelCase = 0.0 if loc is None else loc
super().__init__(_A , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_A )] )
@property
def _lowercase ( self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase ( self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def _lowercase ( self ):
'''simple docstring'''
return self.variance.sqrt()
class A_ (nn.Module ):
def __init__( self , _A , _A , _A , **_A ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase = args_dim
UpperCAmelCase = nn.ModuleList([nn.Linear(_A , _A ) for dim in args_dim.values()] )
UpperCAmelCase = domain_map
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = [proj(_A ) for proj in self.proj]
return self.domain_map(*_A )
class A_ (nn.Module ):
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = function
def _lowercase ( self , _A , *_A ):
'''simple docstring'''
return self.function(_A , *_A )
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , _A = 1 ):
'''simple docstring'''
UpperCAmelCase = dim
UpperCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase ( self , _A ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_A )
else:
return Independent(self.distribution_class(*_A ) , 1 )
def _lowercase ( self , _A , _A = None , _A = None , ):
'''simple docstring'''
UpperCAmelCase = self._base_distribution(_A )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_A , loc=_A , scale=_A , event_dim=self.event_dim )
@property
def _lowercase ( self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase ( self ):
'''simple docstring'''
return len(self.event_shape )
@property
def _lowercase ( self ):
'''simple docstring'''
return 0.0
def _lowercase ( self , _A ):
'''simple docstring'''
return ParameterProjection(
in_features=_A , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase ( self , *_A ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def _lowercase ( _A ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_A ) + 4.0 )) / 2.0
class A_ (a_ ):
UpperCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
UpperCAmelCase__ = StudentT
@classmethod
def _lowercase ( cls , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = cls.squareplus(_A ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase = 2.0 + cls.squareplus(_A )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A_ (a_ ):
UpperCAmelCase__ = {"loc": 1, "scale": 1}
UpperCAmelCase__ = Normal
@classmethod
def _lowercase ( cls , _A , _A ):
'''simple docstring'''
UpperCAmelCase = cls.squareplus(_A ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A_ (a_ ):
UpperCAmelCase__ = {"total_count": 1, "logits": 1}
UpperCAmelCase__ = NegativeBinomial
@classmethod
def _lowercase ( cls , _A , _A ):
'''simple docstring'''
UpperCAmelCase = cls.squareplus(_A )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_A , logits=_A )
else:
return Independent(self.distribution_class(total_count=_A , logits=_A ) , 1 )
def _lowercase ( self , _A , _A = None , _A = None ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 273 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Dict = logging.get_logger(__name__)
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_A )
UpperCAmelCase = do_lower_case
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 273 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Tuple = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A : int = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = list(s_dict.keys() )
for key in keys:
UpperCAmelCase = R'''.*/layers_(\d+)'''
UpperCAmelCase = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ )
UpperCAmelCase = R'''(encoder|decoder)\/'''
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ )
elif groups[0] == "decoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase = s_dict[key].shape[0]
UpperCAmelCase = s_dict[key]
for idx in range(UpperCamelCase__ ):
UpperCAmelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase__ )
return s_dict
__A : Optional[int] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ )
UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ )
UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0]
UpperCAmelCase = str(activation[1] )
UpperCAmelCase = num_experts
UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
UpperCAmelCase = flax_params['''target''']
UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
UpperCAmelCase = rename_keys(UpperCamelCase__ )
UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 | 1 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A : List[str] = logging.getLogger(__name__)
class A_ (a_ ):
UpperCAmelCase__ = '''masked_bert'''
def __init__( self , _A=3_0_5_2_2 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=2 , _A=0.02 , _A=1E-12 , _A=0 , _A="topK" , _A="constant" , _A=0.0 , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = pruning_method
UpperCAmelCase = mask_init
UpperCAmelCase = mask_scale
| 273 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__A : Optional[Any] = logging.get_logger(__name__)
class A_ (a_ ):
def __init__( self , *_A , **_A ):
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 273 |
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple:
'''simple docstring'''
UpperCAmelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__A : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
__A : int = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=8 ) -> str:
'''simple docstring'''
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (a_ ):
def __init__( self , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if latents is None:
UpperCAmelCase = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase = latents.to(_A )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def _lowercase ( self , _A=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def _lowercase ( self , _A=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ):
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 5_1_2 , _A = 5_1_2 , _A = 1_0_0 , _A = 4.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , ):
'''simple docstring'''
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(_A , _A ):
UpperCAmelCase = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
UpperCAmelCase = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
UpperCAmelCase = torch.cat(_A , dim=0 )
UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(_A , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(_A , dim=0 )
UpperCAmelCase = hint.repeat_interleave(_A , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
UpperCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.movq.config.latent_channels
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'''image_embeds''': image_embeds, '''hint''': hint}
UpperCAmelCase = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 273 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 1 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__A : Union[str, Any] = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = test_results.split(''' ''' )
UpperCAmelCase = 0
UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = None
UpperCAmelCase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , UpperCamelCase__ ):
UpperCAmelCase = True
UpperCAmelCase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
UpperCAmelCase = line
UpperCAmelCase = False
return failures
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = title
UpperCAmelCase = doc_test_results['''time_spent'''].split(''',''' )[0]
UpperCAmelCase = doc_test_results['''success''']
UpperCAmelCase = doc_test_results['''failures''']
UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase = doc_test_results
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = [self._time_spent]
UpperCAmelCase = 0
for time in time_spent:
UpperCAmelCase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
UpperCAmelCase = [0, 0, time_parts[0]]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return F"""{int(_A )}h{int(_A )}m{int(_A )}s"""
@property
def _lowercase ( self ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _lowercase ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _lowercase ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = 4_0
UpperCAmelCase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
UpperCAmelCase = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _lowercase ( ):
'''simple docstring'''
UpperCAmelCase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _lowercase ( self ):
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
UpperCAmelCase = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.'''
UpperCAmelCase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _lowercase ( self , _A , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = ''''''
for key, value in failures.items():
UpperCAmelCase = value[:2_0_0] + ''' [Truncated]''' if len(_A ) > 2_5_0 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
UpperCAmelCase = job_name
UpperCAmelCase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
UpperCAmelCase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _lowercase ( self ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
UpperCAmelCase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
UpperCAmelCase = F"""*Num failures* :{len(job_result["failed"] )} \n"""
UpperCAmelCase = job_result['''failures''']
UpperCAmelCase = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F"""Results for {job}""" , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase = os.environ['''GITHUB_RUN_ID''']
UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
UpperCAmelCase = requests.get(UpperCamelCase__ ).json()
UpperCAmelCase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCAmelCase = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(UpperCamelCase__ ):
UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , UpperCamelCase__ )
return {}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = {}
if os.path.exists(UpperCamelCase__ ):
UpperCAmelCase = os.listdir(UpperCamelCase__ )
for file in files:
try:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , encoding='''utf-8''' ) as f:
UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(UpperCamelCase__ , UpperCamelCase__ )}.""" ) from e
return _artifact
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
class A_ :
def __init__( self , _A ):
'''simple docstring'''
UpperCAmelCase = name
UpperCAmelCase = []
def __str__( self ):
'''simple docstring'''
return self.name
def _lowercase ( self , _A ):
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
UpperCAmelCase = {}
UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
UpperCAmelCase = Artifact(UpperCamelCase__ )
_available_artifacts[artifact_name].add_path(UpperCamelCase__ )
return _available_artifacts
if __name__ == "__main__":
__A : Any = get_job_links()
__A : Union[str, Any] = retrieve_available_artifacts()
__A : List[Any] = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__A : str = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__A : Tuple = github_actions_job_links.get("run_doctests")
__A : List[Any] = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__A : Optional[int] = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__A , __A , __A : List[Any] = handle_test_results(artifact["stats"])
__A : List[Any] = failed
__A : Optional[int] = success
__A : int = time_spent[1:-1] + ", "
__A : List[str] = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__A : str = line.replace("FAILED ", "")
__A : List[Any] = line.split()[0].replace("\n", "")
if "::" in line:
__A , __A : List[str] = line.split("::")
else:
__A , __A : Union[str, Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__A : Optional[int] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__A : Union[str, Any] = all_failures[test] if test in all_failures else "N/A"
__A : Any = failure
break
__A : Optional[int] = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 273 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__A : Union[str, Any] = logging.get_logger(__name__)
class A_ (a_ ):
UpperCAmelCase__ = '''upernet'''
def __init__( self , _A=None , _A=5_1_2 , _A=0.02 , _A=[1, 2, 3, 6] , _A=True , _A=0.4 , _A=3_8_4 , _A=2_5_6 , _A=1 , _A=False , _A=2_5_5 , **_A , ):
'''simple docstring'''
super().__init__(**_A )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCAmelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(_A , _A ):
UpperCAmelCase = backbone_config.get('''model_type''' )
UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase = config_class.from_dict(_A )
UpperCAmelCase = backbone_config
UpperCAmelCase = hidden_size
UpperCAmelCase = initializer_range
UpperCAmelCase = pool_scales
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_in_channels
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = loss_ignore_index
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.backbone_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 273 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 1 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list:
'''simple docstring'''
if len(UpperCamelCase__ ) < 2:
return collection
def circle_sort_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
UpperCAmelCase = False
if low == high:
return swapped
UpperCAmelCase = low
UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
UpperCAmelCase , UpperCAmelCase = (
collection[right],
collection[left],
)
UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCAmelCase , UpperCAmelCase = (
collection[right + 1],
collection[left],
)
UpperCAmelCase = True
UpperCAmelCase = low + int((high - low) / 2 )
UpperCAmelCase = circle_sort_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = circle_sort_util(UpperCamelCase__ , mid + 1 , UpperCamelCase__ )
return swapped or left_swap or right_swap
UpperCAmelCase = True
while is_not_sorted is True:
UpperCAmelCase = circle_sort_util(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) - 1 )
return collection
if __name__ == "__main__":
__A : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
__A : Tuple = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 273 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Dict = logging.get_logger(__name__)
__A : str = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''longformer'''
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class A_ (a_ ):
def __init__( self , _A , _A = "default" , _A = None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
UpperCAmelCase = True
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
'''simple docstring'''
return 1E-4
@property
def _lowercase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 273 | 1 |
from __future__ import annotations
from math import pow, sqrt
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ):
'''simple docstring'''
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase = self.scheduler.schedule[t]
UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase = self.scheduler.step_correct(
_A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , )
UpperCAmelCase = step_output.prev_sample
UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 273 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__A : Dict = 3
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
print('''Generating primitive root of p''' )
while True:
UpperCAmelCase = random.randrange(3 , UpperCamelCase__ )
if pow(UpperCamelCase__ , 2 , UpperCamelCase__ ) == 1:
continue
if pow(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) == 1:
continue
return g
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print('''Generating prime p...''' )
UpperCAmelCase = rabin_miller.generate_large_prime(UpperCamelCase__ ) # select large prime number.
UpperCAmelCase = primitive_root(UpperCamelCase__ ) # one primitive root on modulo p.
UpperCAmelCase = random.randrange(3 , UpperCamelCase__ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase = cryptomath.find_mod_inverse(pow(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
UpperCAmelCase = (key_size, e_a, e_a, p)
UpperCAmelCase = (key_size, d)
return public_key, private_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
UpperCAmelCase , UpperCAmelCase = generate_key(UpperCamelCase__ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 273 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A : str = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(_A )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowercase ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
| 273 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class A_ (unittest.TestCase ):
UpperCAmelCase__ = JukeboxTokenizer
UpperCAmelCase__ = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _lowercase ( self ):
'''simple docstring'''
import torch
UpperCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
UpperCAmelCase = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
import torch
UpperCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
UpperCAmelCase = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 273 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 1 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(UpperCamelCase__ , UpperCamelCase__ )
return actual_power(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 273 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 273 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = TaConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase = TaForConditionalGeneration(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 273 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
@staticmethod
def _lowercase ( *_A , **_A ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class A_ (unittest.TestCase ):
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase = image_classifier(_A , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_A ) , [
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}],
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''c'''}, {'''score''': 0.3_33, '''label''': '''b'''}],
] , )
UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_A ) , [
[
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
],
] , )
@require_tf
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase = image_classifier(_A , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(_A ) , [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}] , )
UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_A ) , [
[
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
{'''score''': 0.3_33, '''label''': ANY(_A )},
],
] , )
@slow
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase = image_classifier(_A , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_A ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_A ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase = image_classifier(_A , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_A ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_A ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
| 273 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 | 1 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCAmelCase = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 273 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class A_ (a_ ):
UpperCAmelCase__ = '''megatron-bert'''
def __init__( self , _A=2_9_0_5_6 , _A=1_0_2_4 , _A=2_4 , _A=1_6 , _A=4_0_9_6 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=2 , _A=0.02 , _A=1E-12 , _A=0 , _A="absolute" , _A=True , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 273 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 273 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A_ :
# setable values
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None # sigma(t_i)
@classmethod
def _lowercase ( cls ):
'''simple docstring'''
return cls()
@dataclass
class A_ (a_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class A_ (a_ , a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , _A = 0.02 , _A = 1_0_0 , _A = 1.0_07 , _A = 8_0 , _A = 0.05 , _A = 5_0 , ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
return KarrasVeSchedulerState.create()
def _lowercase ( self , _A , _A , _A = () ):
'''simple docstring'''
UpperCAmelCase = jnp.arange(0 , _A )[::-1].copy()
UpperCAmelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_A , schedule=jnp.array(_A , dtype=jnp.floataa ) , timesteps=_A , )
def _lowercase ( self , _A , _A , _A , _A , ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase = random.split(_A , num=1 )
UpperCAmelCase = self.config.s_noise * random.normal(key=_A , shape=sample.shape )
UpperCAmelCase = sigma + gamma * sigma
UpperCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowercase ( self , _A , _A , _A , _A , _A , _A = True , ):
'''simple docstring'''
UpperCAmelCase = sample_hat + sigma_hat * model_output
UpperCAmelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A )
def _lowercase ( self , _A , _A , _A , _A , _A , _A , _A , _A = True , ):
'''simple docstring'''
UpperCAmelCase = sample_prev + sigma_prev * model_output
UpperCAmelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A )
def _lowercase ( self , _A , _A , _A , _A ):
'''simple docstring'''
raise NotImplementedError()
| 273 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowercase ( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = True
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __call__( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(_A ) for feature in features]
UpperCAmelCase = len(_A )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
UpperCAmelCase = list(chain(*_A ) )
UpperCAmelCase = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 273 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : Tuple = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class A_ (a_ ):
UpperCAmelCase__ = '''trajectory_transformer'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _A=1_0_0 , _A=5 , _A=1 , _A=1 , _A=2_4_9 , _A=6 , _A=1_7 , _A=2_5 , _A=4 , _A=4 , _A=1_2_8 , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.00_06 , _A=5_1_2 , _A=0.02 , _A=1E-12 , _A=1 , _A=True , _A=1 , _A=5_0_2_5_6 , _A=5_0_2_5_6 , **_A , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = action_weight
UpperCAmelCase = reward_weight
UpperCAmelCase = value_weight
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = block_size
UpperCAmelCase = action_dim
UpperCAmelCase = observation_dim
UpperCAmelCase = transition_dim
UpperCAmelCase = learning_rate
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = n_embd
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = resid_pdrop
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = kaiming_initializer_range
UpperCAmelCase = use_cache
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 273 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = FunnelConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase = FunnelBaseModel(UpperCamelCase__ ) if base_model else FunnelModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__A : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 273 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class A_ (unittest.TestCase ):
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCAmelCase__ = ['''accelerate''', '''launch''']
UpperCAmelCase__ = Path.home() / '''.cache/huggingface/accelerate'''
UpperCAmelCase__ = '''default_config.yaml'''
UpperCAmelCase__ = config_folder / config_file
UpperCAmelCase__ = config_folder / '''_default_config.yaml'''
UpperCAmelCase__ = Path('''tests/test_configs''' )
@classmethod
def _lowercase ( cls ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _lowercase ( cls ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _lowercase ( self ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_A ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_A ), self.test_file_path] , env=os.environ.copy() )
def _lowercase ( self ):
'''simple docstring'''
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class A_ (unittest.TestCase ):
UpperCAmelCase__ = '''test-tpu'''
UpperCAmelCase__ = '''us-central1-a'''
UpperCAmelCase__ = '''ls'''
UpperCAmelCase__ = ['''accelerate''', '''tpu-config''']
UpperCAmelCase__ = '''cd /usr/share'''
UpperCAmelCase__ = '''tests/test_samples/test_command_file.sh'''
UpperCAmelCase__ = '''Running gcloud compute tpus tpu-vm ssh'''
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_A , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_A , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_A )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_A , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_A , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_A , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_A , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_A , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_A , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
| 273 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__A : str = logging.get_logger(__name__)
@add_end_docstrings(
a_ , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class A_ (a_ ):
def _lowercase ( self , _A ):
'''simple docstring'''
if self.framework == "tf":
UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_A )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = self.get_masked_index(_A )
UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase ( self , _A ):
'''simple docstring'''
if isinstance(_A , _A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_A )
def _lowercase ( self , _A , _A=None , **_A ):
'''simple docstring'''
if return_tensors is None:
UpperCAmelCase = self.framework
UpperCAmelCase = self.tokenizer(_A , return_tensors=_A )
self.ensure_exactly_one_mask_token(_A )
return model_inputs
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = self.model(**_A )
UpperCAmelCase = model_inputs['''input_ids''']
return model_outputs
def _lowercase ( self , _A , _A=5 , _A=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase = target_ids.shape[0]
UpperCAmelCase = model_outputs['''input_ids'''][0]
UpperCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase = outputs.numpy()
UpperCAmelCase = outputs[0, masked_index, :]
UpperCAmelCase = stable_softmax(_A , axis=-1 )
if target_ids is not None:
UpperCAmelCase = tf.gather_nd(tf.squeeze(_A , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase = tf.expand_dims(_A , 0 )
UpperCAmelCase = tf.math.top_k(_A , k=_A )
UpperCAmelCase , UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase = outputs[0, masked_index, :]
UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase = probs[..., target_ids]
UpperCAmelCase , UpperCAmelCase = probs.topk(_A )
UpperCAmelCase = []
UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase = target_ids[p].tolist()
UpperCAmelCase = p
# Filter padding out:
UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase = self.tokenizer.decode(_A , skip_special_tokens=_A )
UpperCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_A )
result.append(_A )
if single_mask:
return result[0]
return result
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
if isinstance(_A , _A ):
UpperCAmelCase = [targets]
try:
UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase = {}
UpperCAmelCase = []
for target in targets:
UpperCAmelCase = vocab.get(_A , _A )
if id_ is None:
UpperCAmelCase = self.tokenizer(
_A , add_special_tokens=_A , return_attention_mask=_A , return_token_type_ids=_A , max_length=1 , truncation=_A , )['''input_ids''']
if len(_A ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase = list(set(_A ) )
if len(_A ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
UpperCAmelCase = np.array(_A )
return target_ids
def _lowercase ( self , _A=None , _A=None ):
'''simple docstring'''
UpperCAmelCase = {}
if targets is not None:
UpperCAmelCase = self.get_target_ids(_A , _A )
UpperCAmelCase = target_ids
if top_k is not None:
UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _A , *_A , **_A ):
'''simple docstring'''
UpperCAmelCase = super().__call__(_A , **_A )
if isinstance(_A , _A ) and len(_A ) == 1:
return outputs[0]
return outputs
| 273 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__=None , UpperCamelCase__=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCAmelCase__ = list_field(
default=a_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
try:
int(UpperCamelCase__ )
return True
except ValueError:
return False
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Dict:
'''simple docstring'''
try:
float(UpperCamelCase__ )
return True
except ValueError:
return False
class A_ :
def __init__( self , _A ):
'''simple docstring'''
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(_A )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase) , (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_A , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase) , (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(_A , _A )[: len(_A )]
plt.scatter(
_A , _A , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(_A , _A , '''--''' )
title_str += F""" {label_model_name} vs."""
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_A )
plt.xlabel(_A )
plt.ylabel(_A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser(UpperCamelCase__ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=UpperCamelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 273 |
from __future__ import annotations
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float:
'''simple docstring'''
UpperCAmelCase = x_start
UpperCAmelCase = fnc(UpperCamelCase__ )
UpperCAmelCase = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase = (x_end - x_start) / steps + xa
UpperCAmelCase = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase = xa
UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
__A : List[Any] = 10
while i <= 100_000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 273 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__A : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
__A : Optional[int] = 256
class A_ (a_ ):
UpperCAmelCase__ = ['''melgan''']
def __init__( self , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
# From MELGAN
UpperCAmelCase = math.log(1E-5 ) # Matches MelGAN training.
UpperCAmelCase = 4.0 # Largest value for most examples
UpperCAmelCase = 1_2_8
self.register_modules(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
def _lowercase ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = output_range
if clip:
UpperCAmelCase = torch.clip(_A , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _lowercase ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = input_range
UpperCAmelCase = torch.clip(_A , _A , _A ) if clip else outputs
# Scale to [0, 1].
UpperCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = input_tokens > 0
UpperCAmelCase , UpperCAmelCase = self.notes_encoder(
encoder_input_tokens=_A , encoder_inputs_mask=_A )
UpperCAmelCase , UpperCAmelCase = self.continuous_encoder(
encoder_inputs=_A , encoder_inputs_mask=_A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = noise_time
if not torch.is_tensor(_A ):
UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
UpperCAmelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase = self.decoder(
encodings_and_masks=_A , decoder_input_tokens=_A , decoder_noise_time=_A )
return logits
@torch.no_grad()
def __call__( self , _A , _A = None , _A = 1_0_0 , _A = True , _A = "numpy" , _A = None , _A = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_A )}.""" )
UpperCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCAmelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
for i, encoder_input_tokens in enumerate(_A ):
if i == 0:
UpperCAmelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCAmelCase = ones
UpperCAmelCase = self.scale_features(
_A , output_range=[-1.0, 1.0] , clip=_A )
UpperCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_A , continuous_mask=_A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase = self.decode(
encodings_and_masks=_A , input_tokens=_A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(_A , _A , _A , generator=_A ).prev_sample
UpperCAmelCase = self.scale_to_features(_A , input_range=[-1.0, 1.0] )
UpperCAmelCase = mel[:1]
UpperCAmelCase = mel.cpu().float().numpy()
UpperCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A )
logger.info('''Generated segment''' , _A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
UpperCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_A )
| 273 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Dict = logging.get_logger(__name__)
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_A )
UpperCAmelCase = do_lower_case
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 273 | 1 |
import math
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> float:
'''simple docstring'''
UpperCAmelCase = xa
UpperCAmelCase = xa
while True:
if x_n == x_na or function(UpperCamelCase__ ) == function(UpperCamelCase__ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
UpperCAmelCase = x_na - (
function(UpperCamelCase__ ) / ((function(UpperCamelCase__ ) - function(UpperCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCAmelCase = x_na
UpperCAmelCase = x_na
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> float:
'''simple docstring'''
return math.pow(UpperCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 273 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A : int = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = list(s_dict.keys() )
for key in keys:
UpperCAmelCase = R'''.*/layers_(\d+)'''
UpperCAmelCase = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ )
UpperCAmelCase = R'''(encoder|decoder)\/'''
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ )
elif groups[0] == "decoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase = s_dict[key].shape[0]
UpperCAmelCase = s_dict[key]
for idx in range(UpperCamelCase__ ):
UpperCAmelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase__ )
return s_dict
__A : Optional[int] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ )
UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ )
UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0]
UpperCAmelCase = str(activation[1] )
UpperCAmelCase = num_experts
UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
UpperCAmelCase = flax_params['''target''']
UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
UpperCAmelCase = rename_keys(UpperCamelCase__ )
UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 | 1 |
from __future__ import annotations
__A : List[Any] = list[tuple[int, int]]
__A : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : Optional[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A_ :
def __init__( self , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
UpperCAmelCase = pos_x
UpperCAmelCase = pos_y
UpperCAmelCase = (pos_y, pos_x)
UpperCAmelCase = goal_x
UpperCAmelCase = goal_y
UpperCAmelCase = g_cost
UpperCAmelCase = parent
UpperCAmelCase = self.calculate_heuristic()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = abs(self.pos_x - self.goal_x )
UpperCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _A ):
'''simple docstring'''
return self.f_cost < other.f_cost
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _A )
UpperCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , _A )
UpperCAmelCase = [self.start]
UpperCAmelCase = []
UpperCAmelCase = False
def _lowercase ( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase = True
return self.retrace_path(_A )
self.closed_nodes.append(_A )
UpperCAmelCase = self.get_successors(_A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_A )
else:
# retrieve the best current path
UpperCAmelCase = self.open_nodes.pop(self.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_A )
else:
self.open_nodes.append(_A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = []
for action in delta:
UpperCAmelCase = parent.pos_x + action[1]
UpperCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_A , _A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _A , ) )
return successors
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = node
UpperCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Union[str, Any] = (0, 0)
__A : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
__A : str = GreedyBestFirst(init, goal)
__A : int = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : int = 2
for elem in grid:
print(elem)
| 273 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A : List[Any] = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 |
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple:
'''simple docstring'''
UpperCAmelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__A : Tuple = logging.get_logger(__name__)
@dataclass
class A_ (a_ ):
UpperCAmelCase__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_A ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase = deprecated_arg[3:]
setattr(self , _A , not kwargs.pop(_A ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCAmelCase = kwargs.pop('''torchscript''' , self.torchscript )
UpperCAmelCase = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
UpperCAmelCase = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**_A )
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''Trace the models using torchscript'''} )
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
UpperCAmelCase__ = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
UpperCAmelCase = torch.device('''cpu''' )
UpperCAmelCase = 0
elif is_torch_tpu_available():
UpperCAmelCase = xm.xla_device()
UpperCAmelCase = 0
else:
UpperCAmelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
UpperCAmelCase = torch.cuda.device_count()
return device, n_gpu
@property
def _lowercase ( self ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def _lowercase ( self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowercase ( self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _lowercase ( self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _lowercase ( self ):
'''simple docstring'''
return self.n_gpu > 0
| 273 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[str] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCAmelCase = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = dct.pop(UpperCamelCase__ )
UpperCAmelCase = val
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = ViTConfig(image_size=384 , qkv_bias=UpperCamelCase__ )
UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCAmelCase = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = 1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase = False
UpperCAmelCase = '''relu'''
UpperCAmelCase = 1024
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
# load HuggingFace model
UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
UpperCAmelCase = val
else:
UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
UpperCAmelCase = outputs.logits
UpperCAmelCase = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 273 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 1 |
from math import factorial
__A : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(UpperCamelCase__ ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 60 , UpperCamelCase__ = 100_0000 ) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
UpperCAmelCase = 0
# the cached sizes of the previous chains
UpperCAmelCase = {}
for start_chain_element in range(1 , UpperCamelCase__ ):
# The temporary set will contain the elements of the chain
UpperCAmelCase = set()
UpperCAmelCase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(UpperCamelCase__ )
chain_set_length += 1
UpperCAmelCase = digit_factorial_sum(UpperCamelCase__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 273 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Dict = logging.get_logger(__name__)
__A : str = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''longformer'''
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class A_ (a_ ):
def __init__( self , _A , _A = "default" , _A = None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
UpperCAmelCase = True
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
'''simple docstring'''
return 1E-4
@property
def _lowercase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 273 | 1 |
import operator as op
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = lambda UpperCamelCase__ , UpperCamelCase__ : int(x / y ) # noqa: E731 integer division operation
UpperCAmelCase = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(UpperCamelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(UpperCamelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(UpperCamelCase__ ) , sep=''' | ''' )
else:
UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(UpperCamelCase__ ) , sep=''' | ''' )
UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(UpperCamelCase__ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(UpperCamelCase__ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
__A : int = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 273 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ):
'''simple docstring'''
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase = self.scheduler.schedule[t]
UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase = self.scheduler.step_correct(
_A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , )
UpperCAmelCase = step_output.prev_sample
UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 273 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A : int = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = list(s_dict.keys() )
for key in keys:
UpperCAmelCase = R'''.*/layers_(\d+)'''
UpperCAmelCase = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ )
UpperCAmelCase = R'''(encoder|decoder)\/'''
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ )
elif groups[0] == "decoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase = s_dict[key].shape[0]
UpperCAmelCase = s_dict[key]
for idx in range(UpperCamelCase__ ):
UpperCAmelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase__ )
return s_dict
__A : Optional[int] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ )
UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ )
UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0]
UpperCAmelCase = str(activation[1] )
UpperCAmelCase = num_experts
UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
UpperCAmelCase = flax_params['''target''']
UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
UpperCAmelCase = rename_keys(UpperCamelCase__ )
UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A : str = random.Random()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=1.0 , UpperCamelCase__=None , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=8_0 , _A=1_6 , _A=6_4 , _A="hann_window" , _A=8_0 , _A=7_6_0_0 , _A=1E-10 , _A=True , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def _lowercase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
def _lowercase ( self , _A=False , _A=False ):
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaFeatureExtractor
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def _lowercase ( self , _A ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , padding=_A , max_length=_A , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
UpperCAmelCase = feat_extract(_A , max_length=_A , padding=_A )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=_A , padding=_A , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(_A )
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_A )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**_A )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(_A ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(_A )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowercase ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _A , atol=1E-6 ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _A , atol=1E-4 ) )
| 273 | 1 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class A_ (a_ ):
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = 8
# DPR tok
UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_A , exist_ok=_A )
UpperCAmelCase = os.path.join(_A , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase = {'''unk_token''': '''<unk>'''}
UpperCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_A , exist_ok=_A )
UpperCAmelCase = os.path.join(_A , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(_A , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def _lowercase ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowercase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
UpperCAmelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
UpperCAmelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_A )
rag_tokenizer.save_pretrained(_A )
UpperCAmelCase = RagTokenizer.from_pretrained(_A , config=_A )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _A )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _A )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
UpperCAmelCase = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
UpperCAmelCase = tokenizer(_A )
self.assertIsNotNone(_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
UpperCAmelCase = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
UpperCAmelCase = tokenizer(_A )
self.assertIsNotNone(_A )
| 273 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A : Optional[int] = logging.getLogger(__name__)
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
__A : List[str] = parser.parse_args()
logger.info(F'Loading data from {args.data_file}')
with open(args.data_file, "rb") as fp:
__A : int = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__A : List[str] = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A : Tuple = [0] * args.vocab_size
for k, v in counter.items():
__A : Union[str, Any] = v
logger.info(F'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 273 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 273 | 1 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase = [False] * len(UpperCamelCase__ )
UpperCAmelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = True
UpperCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A : List[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 273 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273 | 1 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A_ (a_ ):
def __init__( self ):
'''simple docstring'''
UpperCAmelCase = []
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_init_end''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_train_begin''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_train_end''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_epoch_begin''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_epoch_end''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_step_begin''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_step_end''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_evaluate''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_predict''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_save''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_log''' )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
self.events.append('''on_prediction_step''' )
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
def _lowercase ( self ):
'''simple docstring'''
shutil.rmtree(self.output_dir )
def _lowercase ( self , _A=0 , _A=0 , _A=6_4 , _A=6_4 , _A=None , _A=False , **_A ):
'''simple docstring'''
UpperCAmelCase = RegressionDataset(length=_A )
UpperCAmelCase = RegressionDataset(length=_A )
UpperCAmelCase = RegressionModelConfig(a=_A , b=_A )
UpperCAmelCase = RegressionPreTrainedModel(_A )
UpperCAmelCase = TrainingArguments(self.output_dir , disable_tqdm=_A , report_to=[] , **_A )
return Trainer(
_A , _A , train_dataset=_A , eval_dataset=_A , callbacks=_A , )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
self.assertEqual(len(_A ) , len(_A ) )
# Order doesn't matter
UpperCAmelCase = sorted(_A , key=lambda _A : cb.__name__ if isinstance(_A , _A ) else cb.__class__.__name__ )
UpperCAmelCase = sorted(_A , key=lambda _A : cb.__name__ if isinstance(_A , _A ) else cb.__class__.__name__ )
for cba, cba in zip(_A , _A ):
if isinstance(_A , _A ) and isinstance(_A , _A ):
self.assertEqual(_A , _A )
elif isinstance(_A , _A ) and not isinstance(_A , _A ):
self.assertEqual(_A , cba.__class__ )
elif not isinstance(_A , _A ) and isinstance(_A , _A ):
self.assertEqual(cba.__class__ , _A )
else:
self.assertEqual(_A , _A )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = ['''on_init_end''', '''on_train_begin''']
UpperCAmelCase = 0
UpperCAmelCase = len(trainer.get_eval_dataloader() )
UpperCAmelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(_A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_trainer()
UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase = self.get_trainer(disable_tqdm=_A )
UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_A )
expected_callbacks.remove(_A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
UpperCAmelCase = self.get_trainer()
UpperCAmelCase = trainer.pop_callback(_A )
self.assertEqual(cb.__class__ , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
trainer.add_callback(_A )
expected_callbacks.insert(0 , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
# We can also add, pop, or remove by instance
UpperCAmelCase = self.get_trainer()
UpperCAmelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_A )
expected_callbacks.remove(_A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
UpperCAmelCase = self.get_trainer()
UpperCAmelCase = trainer.callback_handler.callbacks[0]
UpperCAmelCase = trainer.pop_callback(_A )
self.assertEqual(_A , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
trainer.add_callback(_A )
expected_callbacks.insert(0 , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
def _lowercase ( self ):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=_A )
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
# Independent log/save/eval
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
# A bit of everything
UpperCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
UpperCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_A ) in warn_mock.call_args[0][0]
| 273 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Tuple = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ (a_ ):
UpperCAmelCase__ = '''big_bird'''
def __init__( self , _A=5_0_3_5_8 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=4_0_9_6 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=0 , _A=1 , _A=2 , _A=6_6 , _A="block_sparse" , _A=True , _A=False , _A=6_4 , _A=3 , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class A_ (a_ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 273 | 1 |
from math import pow
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCAmelCase = int(pow(UpperCamelCase__ , UpperCamelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCAmelCase , UpperCAmelCase = backtrack(
UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCAmelCase , UpperCAmelCase = backtrack(
UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ )
return current_sum, solutions_count
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(UpperCamelCase__ , UpperCamelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class A_ (a_ ):
UpperCAmelCase__ = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ = '''BlipImageProcessor'''
UpperCAmelCase__ = '''AutoTokenizer'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
super().__init__(_A , _A )
# add QFormer tokenizer
UpperCAmelCase = qformer_tokenizer
def __call__( self , _A = None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 0 , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
UpperCAmelCase = BatchFeature()
if text is not None:
UpperCAmelCase = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
encoding.update(_A )
UpperCAmelCase = self.qformer_tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
UpperCAmelCase = qformer_text_encoding.pop('''input_ids''' )
UpperCAmelCase = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
UpperCAmelCase = self.image_processor(_A , return_tensors=_A )
encoding.update(_A )
return encoding
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowercase ( self , _A , **_A ):
'''simple docstring'''
if os.path.isfile(_A ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_A , exist_ok=_A )
UpperCAmelCase = os.path.join(_A , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(_A )
return super().save_pretrained(_A , **_A )
@classmethod
def _lowercase ( cls , _A , **_A ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained(_A , subfolder='''qformer_tokenizer''' )
UpperCAmelCase = cls._get_arguments_from_pretrained(_A , **_A )
args.append(_A )
return cls(*_A )
| 273 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
| 273 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowercase ( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = True
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __call__( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(_A ) for feature in features]
UpperCAmelCase = len(_A )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
UpperCAmelCase = list(chain(*_A ) )
UpperCAmelCase = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 273 | 1 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase__ ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
'''simple docstring'''
UpperCAmelCase = 0.0
for coeff in reversed(UpperCamelCase__ ):
UpperCAmelCase = result * x + coeff
return result
if __name__ == "__main__":
__A : int = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : Optional[int] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 273 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A_ (unittest.TestCase ):
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = FlaxViTModel(config=_A )
UpperCAmelCase = model(_A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (self.image_size, self.image_size)
UpperCAmelCase = (self.patch_size, self.patch_size)
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = FlaxViTForImageClassification(config=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = FlaxViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = FlaxViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(_A , _A )
UpperCAmelCase = model_class(_A )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(_A )
| 273 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273 | 1 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 100 ) -> int:
'''simple docstring'''
UpperCAmelCase = set()
UpperCAmelCase = 0
UpperCAmelCase = n + 1 # maximum limit
for a in range(2 , UpperCamelCase__ ):
for b in range(2 , UpperCamelCase__ ):
UpperCAmelCase = a**b # calculates the current power
collect_powers.add(UpperCamelCase__ ) # adds the result to the set
return len(UpperCamelCase__ )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 273 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 1 |
__A : str = "Tobias Carryer"
from time import time
class A_ :
def __init__( self , _A , _A , _A , _A=int(time() ) ): # noqa: B008
'''simple docstring'''
UpperCAmelCase = multiplier
UpperCAmelCase = increment
UpperCAmelCase = modulo
UpperCAmelCase = seed
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__A : Optional[int] = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 273 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__A : List[str] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class A_ (a_ ):
UpperCAmelCase__ = ['''pixel_values''']
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = None , _A = True , _A = 1 / 2_5_5 , _A = True , _A = True , _A = None , _A = None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6}
UpperCAmelCase = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCAmelCase = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _lowercase ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def _lowercase ( self , _A , _A , _A = True , _A = None , **_A , ):
'''simple docstring'''
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(_A , scale=_A , data_format=_A , **_A )
def _lowercase ( self , _A , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _lowercase ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(_A )
if do_resize:
UpperCAmelCase = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
UpperCAmelCase = self.center_crop(_A , size=_A )
if do_rescale:
UpperCAmelCase = self.rescale(image=_A , scale=_A , offset=_A )
if do_normalize:
UpperCAmelCase = self.normalize(image=_A , mean=_A , std=_A )
UpperCAmelCase = to_channel_dimension_format(_A , _A )
return image
def _lowercase ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(_A )
UpperCAmelCase = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , offset=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 273 |
from __future__ import annotations
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> float:
'''simple docstring'''
UpperCAmelCase = x_start
UpperCAmelCase = fnc(UpperCamelCase__ )
UpperCAmelCase = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase = (x_end - x_start) / steps + xa
UpperCAmelCase = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase = xa
UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
__A : List[Any] = 10
while i <= 100_000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 273 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ (a_ ):
UpperCAmelCase__ = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ = '''LayoutLMv2ImageProcessor'''
UpperCAmelCase__ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , _A=None , _A=None , **_A ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_A , _A )
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 0 , _A = None , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['''words''']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
UpperCAmelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(_A , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase = images
return encoded_inputs
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(_A )} and {len(_A )}""" )
return images_with_overflow
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@property
def _lowercase ( self ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowercase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , )
return self.image_processor_class
@property
def _lowercase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , )
return self.image_processor
| 273 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Dict = logging.get_logger(__name__)
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_A )
UpperCAmelCase = do_lower_case
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 273 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A : int = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = list(s_dict.keys() )
for key in keys:
UpperCAmelCase = R'''.*/layers_(\d+)'''
UpperCAmelCase = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , UpperCamelCase__ )
UpperCAmelCase = R'''(encoder|decoder)\/'''
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , UpperCamelCase__ )
elif groups[0] == "decoder":
UpperCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , UpperCamelCase__ )
UpperCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase = s_dict[key].shape[0]
UpperCAmelCase = s_dict[key]
for idx in range(UpperCamelCase__ ):
UpperCAmelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCamelCase__ )
return s_dict
__A : Optional[int] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , UpperCamelCase__ )
UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase = float(UpperCamelCase__ ) if '''.''' in value else int(UpperCamelCase__ )
UpperCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , UpperCamelCase__ )[0]
UpperCAmelCase = str(activation[1] )
UpperCAmelCase = num_experts
UpperCAmelCase = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="./" , UpperCamelCase__=8 ) -> List[Any]:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
UpperCAmelCase = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCAmelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
UpperCAmelCase = flax_params['''target''']
UpperCAmelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
UpperCAmelCase = rename_keys(UpperCamelCase__ )
UpperCAmelCase = unflatten_dict(UpperCamelCase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__A : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__A : List[Any] = NewType("DataClass", Any)
__A : Optional[Any] = NewType("DataClassType", Any)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Callable[[str], Any]:
'''simple docstring'''
UpperCAmelCase = {str(UpperCamelCase__ ): choice for choice in choices}
return lambda UpperCamelCase__ : str_to_choice.get(UpperCamelCase__ , UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( *,
UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = dataclasses.MISSING , UpperCamelCase__ = dataclasses.MISSING , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCAmelCase = {}
if aliases is not None:
UpperCAmelCase = aliases
if help is not None:
UpperCAmelCase = help
return dataclasses.field(metadata=UpperCamelCase__ , default=UpperCamelCase__ , default_factory=UpperCamelCase__ , **UpperCamelCase__ )
class A_ (a_ ):
UpperCAmelCase__ = 42
def __init__( self , _A , **_A ):
'''simple docstring'''
if "formatter_class" not in kwargs:
UpperCAmelCase = ArgumentDefaultsHelpFormatter
super().__init__(**_A )
if dataclasses.is_dataclass(_A ):
UpperCAmelCase = [dataclass_types]
UpperCAmelCase = list(_A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_A )
@staticmethod
def _lowercase ( _A , _A ):
'''simple docstring'''
UpperCAmelCase = F"""--{field.name}"""
UpperCAmelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _A ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
UpperCAmelCase = kwargs.pop('''aliases''' , [] )
if isinstance(_A , _A ):
UpperCAmelCase = [aliases]
UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(_A , '''UnionType''' ) and isinstance(_A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_A ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F""" Problem encountered in field '{field.name}'.""" )
if type(_A ) not in field.type.__args__:
# filter `str` in Union
UpperCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCAmelCase = (
field.type.__args__[0] if isinstance(_A , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCAmelCase = {}
if origin_type is Literal or (isinstance(field.type , _A ) and issubclass(field.type , _A )):
if origin_type is Literal:
UpperCAmelCase = field.type.__args__
else:
UpperCAmelCase = [x.value for x in field.type]
UpperCAmelCase = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
UpperCAmelCase = field.default
else:
UpperCAmelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCAmelCase = copy(_A )
# Hack because type=bool in argparse does not behave as we want.
UpperCAmelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCAmelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCAmelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCAmelCase = '''?'''
# This is the value that will get picked if we do --field_name (without value)
UpperCAmelCase = True
elif isclass(_A ) and issubclass(_A , _A ):
UpperCAmelCase = field.type.__args__[0]
UpperCAmelCase = '''+'''
if field.default_factory is not dataclasses.MISSING:
UpperCAmelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCAmelCase = True
else:
UpperCAmelCase = field.type
if field.default is not dataclasses.MISSING:
UpperCAmelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCAmelCase = field.default_factory()
else:
UpperCAmelCase = True
parser.add_argument(_A , *_A , **_A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCAmelCase = False
parser.add_argument(F"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **_A )
def _lowercase ( self , _A ):
'''simple docstring'''
if hasattr(_A , '''_argument_group_name''' ):
UpperCAmelCase = self.add_argument_group(dtype._argument_group_name )
else:
UpperCAmelCase = self
try:
UpperCAmelCase = get_type_hints(_A )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(_A ):
UpperCAmelCase = '''.'''.join(map(_A , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_A ):
if not field.init:
continue
UpperCAmelCase = type_hints[field.name]
self._parse_dataclass_field(_A , _A )
def _lowercase ( self , _A=None , _A=False , _A=True , _A=None , _A=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCAmelCase = []
if args_filename:
args_files.append(Path(_A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCAmelCase = ArgumentParser()
args_file_parser.add_argument(_A , type=_A , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCAmelCase , UpperCAmelCase = args_file_parser.parse_known_args(args=_A )
UpperCAmelCase = vars(_A ).get(args_file_flag.lstrip('''-''' ) , _A )
if cmd_args_file_paths:
args_files.extend([Path(_A ) for p in cmd_args_file_paths] )
UpperCAmelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCAmelCase , UpperCAmelCase = self.parse_known_args(args=_A )
UpperCAmelCase = []
for dtype in self.dataclass_types:
UpperCAmelCase = {f.name for f in dataclasses.fields(_A ) if f.init}
UpperCAmelCase = {k: v for k, v in vars(_A ).items() if k in keys}
for k in keys:
delattr(_A , _A )
UpperCAmelCase = dtype(**_A )
outputs.append(_A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _lowercase ( self , _A , _A = False ):
'''simple docstring'''
UpperCAmelCase = set(args.keys() )
UpperCAmelCase = []
for dtype in self.dataclass_types:
UpperCAmelCase = {f.name for f in dataclasses.fields(_A ) if f.init}
UpperCAmelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCAmelCase = dtype(**_A )
outputs.append(_A )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(_A )}""" )
return tuple(_A )
def _lowercase ( self , _A , _A = False ):
'''simple docstring'''
with open(Path(_A ) , encoding='''utf-8''' ) as open_json_file:
UpperCAmelCase = json.loads(open_json_file.read() )
UpperCAmelCase = self.parse_dict(_A , allow_extra_keys=_A )
return tuple(_A )
def _lowercase ( self , _A , _A = False ):
'''simple docstring'''
UpperCAmelCase = self.parse_dict(yaml.safe_load(Path(_A ).read_text() ) , allow_extra_keys=_A )
return tuple(_A )
| 273 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = "▁"
__A : Optional[int] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
__A : Dict = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
__A : Union[str, Any] = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
__A : Union[str, Any] = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self , _A , _A , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<pad>" , _A="<unk>" , _A="m2m100" , _A = None , _A=8 , **_A , ):
'''simple docstring'''
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = language_codes
UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code}
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_A )
for lang_code in fairseq_language_code
if self.get_lang_token(_A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_A , tgt_lang=_A , bos_token=_A , eos_token=_A , sep_token=_A , unk_token=_A , pad_token=_A , language_codes=_A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_A , **_A , )
UpperCAmelCase = vocab_file
UpperCAmelCase = load_json(_A )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
UpperCAmelCase = spm_file
UpperCAmelCase = load_spm(_A , self.sp_model_kwargs )
UpperCAmelCase = len(self.encoder )
UpperCAmelCase = {
self.get_lang_token(_A ): self.encoder_size + i for i, lang_code in enumerate(_A )
}
UpperCAmelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_A )}
UpperCAmelCase = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en'''
UpperCAmelCase = tgt_lang
UpperCAmelCase = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCAmelCase = num_madeup_words
@property
def _lowercase ( self ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _lowercase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , _A ):
'''simple docstring'''
return self.sp_model.encode(_A , out_type=_A )
def _lowercase ( self , _A ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_A , self.encoder[self.unk_token] )
def _lowercase ( self , _A ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_A , self.unk_token )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
UpperCAmelCase = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def _lowercase ( self , _A , _A = None , _A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self , _A ):
'''simple docstring'''
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = Path(_A )
if not save_dir.is_dir():
raise OSError(F"""{save_directory} should be a directory""" )
UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _A )
if os.path.abspath(self.spm_file ) != os.path.abspath(_A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _A )
elif not os.path.isfile(self.spm_file ):
with open(_A , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_A )
return (str(_A ), str(_A ))
def _lowercase ( self , _A , _A = "en" , _A = None , _A = "ro" , **_A , ):
'''simple docstring'''
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_A , _A , **_A )
def _lowercase ( self , _A , _A , _A , **_A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(_A , add_special_tokens=_A , **_A )
UpperCAmelCase = self.get_lang_id(_A )
UpperCAmelCase = tgt_lang_id
return inputs
def _lowercase ( self ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = self.get_lang_token(_A )
UpperCAmelCase = self.lang_token_to_id[lang_token]
UpperCAmelCase = [self.cur_lang_id]
UpperCAmelCase = [self.eos_token_id]
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = self.get_lang_token(_A )
UpperCAmelCase = self.lang_token_to_id[lang_token]
UpperCAmelCase = [self.cur_lang_id]
UpperCAmelCase = [self.eos_token_id]
def _lowercase ( self , _A ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = self.get_lang_token(_A )
return self.lang_token_to_id[lang_token]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
UpperCAmelCase = sentencepiece.SentencePieceProcessor(**UpperCamelCase__ )
spm.Load(str(UpperCamelCase__ ) )
return spm
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[Dict, List]:
'''simple docstring'''
with open(UpperCamelCase__ , '''r''' ) as f:
return json.load(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
with open(UpperCamelCase__ , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=2 )
| 273 |
from __future__ import annotations
from collections import namedtuple
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> tuple:
'''simple docstring'''
UpperCAmelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Optional[Any] = logging.get_logger(__name__)
class A_ (a_ ):
UpperCAmelCase__ = ['''pixel_values''']
def __init__( self , _A = True , _A = 3_2 , _A=PILImageResampling.BILINEAR , _A = True , **_A , ):
'''simple docstring'''
UpperCAmelCase = do_resize
UpperCAmelCase = do_rescale
UpperCAmelCase = size_divisor
UpperCAmelCase = resample
super().__init__(**_A )
def _lowercase ( self , _A , _A , _A , _A = None , **_A ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = get_image_size(_A )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase = height // size_divisor * size_divisor
UpperCAmelCase = width // size_divisor * size_divisor
UpperCAmelCase = resize(_A , (new_h, new_w) , resample=_A , data_format=_A , **_A )
return image
def _lowercase ( self , _A , _A , _A = None , **_A ):
'''simple docstring'''
return rescale(image=_A , scale=_A , data_format=_A , **_A )
def _lowercase ( self , _A , _A = None , _A = None , _A=None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(_A ) for img in images]
if do_resize:
UpperCAmelCase = [self.resize(_A , size_divisor=_A , resample=_A ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(_A , scale=1 / 2_5_5 ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 273 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 1 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 100 ) -> int:
'''simple docstring'''
UpperCAmelCase = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 273 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A : Tuple = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ["CLIPFeatureExtractor"]
__A : List[str] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.