code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
__A : Union[str, Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Tuple = DepthEstimationPipeline(model=_A , image_processor=_A )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ ( self , _A , _A ):
__A : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , _A )
import datasets
__A : Union[str, Any] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__A : List[str] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , _A , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def UpperCAmelCase_ ( self ):
pass
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Dict = 'Intel/dpt-large'
__A : List[Any] = pipeline('depth-estimation' , model=_A )
__A : Union[str, Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__A : Optional[int] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.6_6_2 )
@require_torch
def UpperCAmelCase_ ( self ):
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Tuple:
for attribute in key.split('.' ):
__A : Dict = getattr(a , a )
if weight_type is not None:
__A : Any = getattr(a , a ).shape
else:
__A : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__A : Union[str, Any] = value
elif weight_type == "weight_g":
__A : Dict = value
elif weight_type == "weight_v":
__A : Optional[int] = value
elif weight_type == "bias":
__A : int = value
elif weight_type == "running_mean":
__A : Union[str, Any] = value
elif weight_type == "running_var":
__A : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
__A : Any = value
elif weight_type == "inv_freq":
__A : Optional[Any] = value
else:
__A : int = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Union[str, Any]:
__A : Any = []
__A : Optional[int] = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__A : int = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__A : Any = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : Optional[Any] = True
if "*" in mapped_key:
__A : str = name.split(a )[0].split('.' )[-2]
__A : int = mapped_key.replace('*' , a )
if "pos_bias_u" in name:
__A : Optional[int] = None
elif "pos_bias_v" in name:
__A : Dict = None
elif "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Dict = 'weight_v'
elif "bias" in name:
__A : Tuple = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : int = 'weight'
elif "running_mean" in name:
__A : str = 'running_mean'
elif "inv_freq" in name:
__A : List[Any] = 'inv_freq'
elif "running_var" in name:
__A : Union[str, Any] = 'running_var'
elif "num_batches_tracked" in name:
__A : Optional[Any] = 'num_batches_tracked'
else:
__A : List[str] = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Any:
__A : str = full_name.split('conv_layers.' )[-1]
__A : str = name.split('.' )
__A : Dict = int(items[0] )
__A : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__A : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__A : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a , a=None , a=None , a=True ) -> Any:
if config_path is not None:
__A : Tuple = WavaVecaConformerConfig.from_pretrained(a , hidden_act='swish' )
else:
__A : Optional[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__A : Dict = 'rotary'
if is_finetuned:
if dict_path:
__A : Dict = Dictionary.load(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A : int = target_dict.pad_index
__A : List[Any] = target_dict.bos_index
__A : Any = target_dict.eos_index
__A : Dict = len(target_dict.symbols )
__A : Optional[Any] = os.path.join(a , 'vocab.json' )
if not os.path.isdir(a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(a ) )
return
os.makedirs(a , exist_ok=a )
__A : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__A : int = 0
__A : Optional[Any] = 1
with open(a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(a , a )
__A : Optional[Any] = WavaVecaCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=a , )
__A : Tuple = True if config.feat_extract_norm == 'layer' else False
__A : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
__A : Optional[int] = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
__A : List[Any] = WavaVecaConformerForCTC(a )
else:
__A : List[Any] = WavaVecaConformerForPreTraining(a )
if is_finetuned:
__A , __A , __A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__A : Optional[Any] = argparse.Namespace(task='audio_pretraining' )
__A : str = fairseq.tasks.setup_task(a )
__A , __A , __A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a )
__A : Tuple = model[0].eval()
recursively_load_weights(a , a , not is_finetuned )
hf_wavavec.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 77 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = DanceDiffusionPipeline
UpperCamelCase : Tuple = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
UpperCamelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
UpperCamelCase : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
UpperCamelCase : Dict = False
UpperCamelCase : Optional[Any] = False
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Tuple = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCamelCase , use_timestep_embedding=__UpperCamelCase , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
__A : int = IPNDMScheduler()
__A : int = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(__UpperCamelCase ).startswith('mps' ):
__A : str = torch.manual_seed(__UpperCamelCase )
else:
__A : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__A : List[str] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : List[str] = self.get_dummy_components()
__A : str = DanceDiffusionPipeline(**__UpperCamelCase )
__A : List[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__A : List[str] = self.get_dummy_inputs(__UpperCamelCase )
__A : int = pipe(**__UpperCamelCase )
__A : Optional[Any] = output.audios
__A : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__A : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = torch_device
__A : Optional[int] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
__A : List[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__A : int = torch.manual_seed(0 )
__A : str = pipe(generator=__UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
__A : List[Any] = output.audios
__A : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__A : Any = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = torch_device
__A : Union[str, Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
__A : List[str] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__A : Optional[int] = torch.manual_seed(0 )
__A : Dict = pipe(generator=__UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
__A : Union[str, Any] = output.audios
__A : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__A : Any = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 714 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A( snake_case__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( _A ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self ):
raise NotImplementedError()
| 77 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Optional[int] = tempfile.mkdtemp()
__A : Tuple = BlipImageProcessor()
__A : List[Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
__A : Dict = BlipaProcessor(_A , _A )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **_A ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).tokenizer
def UpperCAmelCase_ ( self , **_A ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : int = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[str] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : List[str] = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
__A : Optional[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Dict = self.get_image_processor()
__A : List[Any] = self.get_tokenizer()
__A : Dict = BlipaProcessor(tokenizer=_A , image_processor=_A )
__A : List[Any] = self.prepare_image_inputs()
__A : Optional[int] = image_processor(_A , return_tensors='np' )
__A : Union[str, Any] = processor(images=_A , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : str = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Any = BlipaProcessor(tokenizer=_A , image_processor=_A )
__A : str = 'lower newer'
__A : Dict = processor(text=_A )
__A : List[Any] = tokenizer(_A , return_token_type_ids=_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : List[Any] = self.get_tokenizer()
__A : Any = BlipaProcessor(tokenizer=_A , image_processor=_A )
__A : str = 'lower newer'
__A : Union[str, Any] = self.prepare_image_inputs()
__A : int = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_image_processor()
__A : List[Any] = self.get_tokenizer()
__A : str = BlipaProcessor(tokenizer=_A , image_processor=_A )
__A : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Optional[int] = processor.batch_decode(_A )
__A : int = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase_ ( self ):
__A : int = self.get_image_processor()
__A : List[Any] = self.get_tokenizer()
__A : Optional[int] = BlipaProcessor(tokenizer=_A , image_processor=_A )
__A : Dict = 'lower newer'
__A : Tuple = self.prepare_image_inputs()
__A : Any = processor(text=_A , images=_A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 715 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Optional[int] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 77 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class _A:
"""simple docstring"""
def __init__( self , _A ):
__A : str = value
__A : Dict = None
__A : Tuple = None
class _A:
"""simple docstring"""
def __init__( self , _A ):
__A : Tuple = tree
def UpperCAmelCase_ ( self , _A ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = ShapEPipeline
UpperCamelCase : str = ['''prompt''']
UpperCamelCase : Tuple = ['''prompt''']
UpperCamelCase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase : int = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 8
@property
def UpperCAmelCase_ ( self ):
__A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__A : Optional[Any] = PriorTransformer(**_A )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__A : List[Any] = ShapERenderer(**_A )
return model
def UpperCAmelCase_ ( self ):
__A : List[str] = self.dummy_prior
__A : Optional[int] = self.dummy_text_encoder
__A : List[Any] = self.dummy_tokenizer
__A : str = self.dummy_renderer
__A : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
__A : Any = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(_A ).startswith('mps' ):
__A : List[Any] = torch.manual_seed(_A )
else:
__A : Dict = torch.Generator(device=_A ).manual_seed(_A )
__A : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : Tuple = 'cpu'
__A : Any = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**_A )
__A : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Tuple = pipe(**self.get_dummy_inputs(_A ) )
__A : int = output.images[0]
__A : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__A : Any = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self ):
__A : List[str] = torch_device == 'cpu'
__A : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCAmelCase_ ( self ):
__A : Any = self.get_dummy_components()
__A : Any = self.pipeline_class(**_A )
__A : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Any = 1
__A : Dict = 2
__A : Tuple = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
__A : Optional[int] = batch_size * [inputs[key]]
__A : Optional[int] = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__A : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__A : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : str = torch.Generator(device=_A ).manual_seed(0 )
__A : Tuple = pipe(
'a shark' , generator=_A , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 77 | 0 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
__A : int = nn.Linear(3 , 4 )
__A : str = nn.BatchNormad(4 )
__A : List[str] = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self , _A ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class _A( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A , *_A , **_A ):
return (args[0] + 1,) + args[1:], kwargs
class _A( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A , _A ):
return output + 1
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Tuple = ModelForTest()
__A : Union[str, Any] = ModelHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(test_model._hf_hook , lowerCamelCase__ )
self.assertTrue(hasattr(lowerCamelCase__ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__ , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCamelCase__ , '_old_forward' ) )
def UpperCAmelCase_ ( self ):
__A : Dict = ModelForTest()
__A : List[str] = ModelHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ , append=lowerCamelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCamelCase__ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__ , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCamelCase__ , '_old_forward' ) )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = ModelForTest()
__A : List[str] = torch.randn(2 , 3 )
__A : int = test_model(x + 1 )
__A : List[Any] = test_model(x + 2 )
__A : Optional[int] = PreForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
__A : List[Any] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__A : Optional[Any] = PreForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
__A : List[str] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__A : Tuple = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
__A : Any = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-5 )
def UpperCAmelCase_ ( self ):
__A : Dict = ModelForTest()
__A : str = torch.randn(2 , 3 )
__A : Any = test_model(lowerCamelCase__ )
__A : int = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
__A : Dict = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__A : Dict = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
__A : int = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__A : List[Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
__A : Tuple = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , output + 2 , atol=1e-5 )
def UpperCAmelCase_ ( self ):
__A : List[Any] = ModelForTest()
__A : Tuple = torch.randn(2 , 3 )
__A : Tuple = test_model(lowerCamelCase__ )
__A : Union[str, Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__ , lowerCamelCase__ )
__A : Any = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__A : Optional[int] = True
__A : Optional[int] = test_model(lowerCamelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCAmelCase_ ( self ):
__A : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__A : Tuple = torch.randn(2 , 3 )
__A : str = model(lowerCamelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCamelCase__ , AlignDevicesHook(io_same_device=lowerCamelCase__ ) )
__A : Tuple = torch.randn(2 , 3 ).to(0 )
__A : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCAmelCase_ ( self ):
__A : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__A : int = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__A : int = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
__A : Dict = torch.randn(2 , 3 )
__A : List[str] = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
__A : Optional[Any] = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__A : List[Any] = torch.randn(2 , 3 )
__A : str = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__A : Any = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__A : str = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
__A : int = torch.randn(2 , 3 )
__A : Tuple = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , offload_buffers=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__A : int = torch.randn(2 , 3 )
__A : str = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def UpperCAmelCase_ ( self ):
__A : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__A : List[str] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__A : List[str] = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , lowerCamelCase__ )
__A : List[str] = torch.randn(2 , 3 )
__A : Any = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCamelCase__ , execution_device=lowerCamelCase__ , offload=lowerCamelCase__ , weights_map=model.state_dict() , offload_buffers=lowerCamelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__A : Union[str, Any] = torch.randn(2 , 3 )
__A : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device , lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 717 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if len(a ) != 2 or len(a[0] ) != 2 or len(a ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__A : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[int]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[list, list, list, list]:
if len(a ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__A : str = len(a )
__A : List[Any] = matrix_length // 2
__A : List[str] = [[a[i][j] for j in range(a , a )] for i in range(a )]
__A : Dict = [
[a[i][j] for j in range(a , a )] for i in range(a , a )
]
__A : int = [[a[i][j] for j in range(a )] for i in range(a )]
__A : Any = [[a[i][j] for j in range(a )] for i in range(a , a )]
return top_left, top_right, bot_left, bot_right
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[int, int]:
return len(a ), len(matrix[0] )
def _SCREAMING_SNAKE_CASE ( a ) -> None:
print('\n'.join(str(a ) for line in matrix ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a ) == (2, 2):
return default_matrix_multiplication(a , a )
__A , __A , __A , __A : str = split_matrix(a )
__A , __A , __A , __A : List[Any] = split_matrix(a )
__A : Any = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Tuple = actual_strassen(matrix_addition(a , a ) , a )
__A : List[str] = actual_strassen(matrix_addition(a , a ) , a )
__A : Optional[int] = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Any = actual_strassen(matrix_addition(a , a ) , matrix_addition(a , a ) )
__A : Any = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
__A : Union[str, Any] = matrix_addition(a , a )
__A : str = matrix_addition(a , a )
__A : Dict = matrix_subtraction(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
# construct the new matrix from our 4 quadrants
__A : List[Any] = []
for i in range(len(a ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a )[1] != matrix_dimensions(a )[0]:
__A : Dict = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a )
__A : int = matrix_dimensions(a )
__A : Any = matrix_dimensions(a )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__A : List[Any] = max(*a , *a )
__A : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(a ) ) ) )
__A : Union[str, Any] = matrixa
__A : Optional[int] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__A : str = actual_strassen(a , a )
# Removing the additional zeros
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 77 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Dict = '▁'
UpperCAmelCase : List[str] = {'vocab_file': 'spiece.model'}
UpperCAmelCase : Any = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
UpperCAmelCase : List[str] = {
'google/pegasus-xsum': 5_12,
}
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class _A( UpperCamelCase_ ):
"""simple docstring"""
UpperCamelCase : Dict = VOCAB_FILES_NAMES
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A="<pad>" , _A="</s>" , _A="<unk>" , _A="<mask_2>" , _A="<mask_1>" , _A=None , _A=103 , _A = None , **_A , ):
__A : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(_A , _A ):
raise TypeError(
F"""additional_special_tokens should be of type {type(_A )}, but is"""
F""" {type(_A )}""" )
__A : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_A ) , self.offset - 1 )
]
if len(set(_A ) ) != len(_A ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__A : Union[str, Any] = additional_special_tokens_extended
else:
__A : Optional[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
__A : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_A , unk_token=_A , mask_token=_A , pad_token=_A , mask_token_sent=_A , offset=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__A : Dict = mask_token_sent
__A : Tuple = vocab_file
__A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# add special tokens to encoder dict
__A : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__A : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase_ ( self ):
return len(self.sp_model ) + self.offset
def UpperCAmelCase_ ( self ):
__A : Any = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__A : str = self.__dict__.copy()
__A : Union[str, Any] = None
return state
def __setstate__( self , _A ):
__A : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : List[str] = {}
__A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , _A ):
return self.sp_model.encode(_A , out_type=_A )
def UpperCAmelCase_ ( self , _A ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__A : List[Any] = self.sp_model.piece_to_id(_A )
return sp_id + self.offset
def UpperCAmelCase_ ( self , _A ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__A : List[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = []
__A : Dict = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
__A : Optional[Any] = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def UpperCAmelCase_ ( self , _A=False ):
return 1
def UpperCAmelCase_ ( self , _A ):
__A : Union[str, Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase_ ( self , _A , _A = None , _A = False ):
if already_has_special_tokens:
return self._special_token_mask(_A )
elif token_ids_a is None:
return self._special_token_mask(_A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase_ ( self , _A , _A=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self , _A , _A = None ):
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : Optional[int] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , 'wb' ) as fi:
__A : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 718 |
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[str] = []
__A : Tuple = []
__A : Union[str, Any] = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
__A : List[str] = len(a ) if (len(a ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(a ) , 'Postfix'.center(a ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a ) == 0:
stack.append(a ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(a ) # push x to stack
print(
x.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
while len(a ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
return "".join(a ) # return Postfix as str
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
__A : List[Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(a ) ):
if infix[i] == "(":
__A : List[str] = ')' # change "(" to ")"
elif infix[i] == ")":
__A : Any = '(' # change ")" to "("
return (infix_2_postfix(''.join(a ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase : List[str] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase : Union[str, Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 77 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase : int = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
UpperCAmelCase : int = {
"""169M""": 7_68,
"""430M""": 10_24,
"""1B5""": 20_48,
"""3B""": 25_60,
"""7B""": 40_96,
"""14B""": 51_20,
}
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
__A : List[str] = state_dict.pop(UpperCamelCase__ )
# emb -> embedding
if name.startswith('emb.' ):
__A : int = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__A : Optional[Any] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__A : Optional[Any] = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , UpperCamelCase__ )
# ffn -> feed_forward
__A : List[Any] = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , UpperCamelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__A : int = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__A : Optional[int] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__A : List[str] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__A : Optional[int] = 'rwkv.' + name
__A : Union[str, Any] = weight
return state_dict
def _SCREAMING_SNAKE_CASE ( a , a , a , a=None , a=None , a=False , a=None ) -> List[str]:
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__A : Optional[Any] = 5_02_77
__A : Dict = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__A : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ )
__A : Tuple = len(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
# 2. Build the config
__A : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__A : Union[str, Any] = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
__A : Optional[Any] = RwkvConfig(
vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase__ )
# 3. Download model file then convert state_dict
__A : Any = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ )
__A : Optional[Any] = torch.load(UpperCamelCase__ , map_location='cpu' )
__A : List[Any] = convert_state_dict(UpperCamelCase__ )
# 4. Split in shards and save
__A , __A : Tuple = shard_checkpoint(UpperCamelCase__ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if index is not None:
__A : List[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
# Save the index as well
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
__A : Optional[Any] = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + '\n'
f.write(UpperCamelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__A : Tuple = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__A : List[str] = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__A : Tuple = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ , max_shard_size='2GB' )
tokenizer.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
UpperCAmelCase : int = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 719 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : Tuple = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase : int = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = '''mask2former'''
UpperCamelCase : Any = ['''swin''']
UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , _A = None , _A = 256 , _A = 256 , _A = 256 , _A = 1024 , _A = "relu" , _A = 6 , _A = 10 , _A = 8 , _A = 0.0 , _A = 2048 , _A = False , _A = False , _A = 4 , _A = 255 , _A = 100 , _A = 0.1 , _A = 2.0 , _A = 5.0 , _A = 5.0 , _A = 12544 , _A = 3.0 , _A = 0.7_5 , _A = 0.0_2 , _A = 1.0 , _A = True , _A = [4, 8, 16, 32] , _A = None , **_A , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__A : Optional[int] = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_A , _A ):
__A : Dict = backbone_config.pop('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[str] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
__A : Optional[int] = backbone_config
__A : Optional[Any] = feature_size
__A : Any = mask_feature_size
__A : Optional[Any] = hidden_dim
__A : Union[str, Any] = encoder_feedforward_dim
__A : Optional[Any] = activation_function
__A : List[Any] = encoder_layers
__A : Union[str, Any] = decoder_layers
__A : Dict = num_attention_heads
__A : Tuple = dropout
__A : Dict = dim_feedforward
__A : Tuple = pre_norm
__A : Dict = enforce_input_projection
__A : Optional[int] = common_stride
__A : Optional[Any] = ignore_value
__A : str = num_queries
__A : List[Any] = no_object_weight
__A : List[str] = class_weight
__A : List[Any] = mask_weight
__A : List[Any] = dice_weight
__A : Tuple = train_num_points
__A : Optional[Any] = oversample_ratio
__A : Union[str, Any] = importance_sample_ratio
__A : Union[str, Any] = init_std
__A : int = init_xavier_std
__A : Union[str, Any] = use_auxiliary_loss
__A : Union[str, Any] = feature_strides
__A : List[Any] = output_auxiliary_logits
__A : Optional[Any] = decoder_layers
super().__init__(**_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
return cls(
backbone_config=_A , **_A , )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = copy.deepcopy(self.__dict__ )
__A : List[Any] = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
| 77 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCAmelCase : Optional[Any] = ['''bert-base-uncased''', '''bert-base-cased''']
UpperCAmelCase : Optional[int] = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class _A( tf.keras.Model ):
"""simple docstring"""
def __init__( self , _A ):
super().__init__()
__A : str = tokenizer
__A : int = AutoConfig.from_pretrained(_A )
__A : Union[str, Any] = TFAutoModel.from_config(_A )
def UpperCAmelCase_ ( self , _A ):
__A : Dict = self.tokenizer(_A )
__A : List[Any] = self.bert(**_A )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Optional[Any] = [
BertTokenizer.from_pretrained(_A ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__A : int = [TFBertTokenizer.from_pretrained(_A ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_A , use_fast_bert_tokenizer=_A )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__A : List[str] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__A : int = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCAmelCase_ ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__A : Union[str, Any] = tokenizer(_A , return_tensors='tf' , padding='longest' )
__A : Any = tf_tokenizer(_A )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def UpperCAmelCase_ ( self ):
for tf_tokenizer in self.tf_tokenizers:
__A : Optional[Any] = tf_tokenizer(self.paired_sentences )
__A : List[str] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def UpperCAmelCase_ ( self ):
for tf_tokenizer in self.tf_tokenizers:
__A : Union[str, Any] = tf.function(_A )
for test_inputs in (self.test_sentences, self.paired_sentences):
__A : str = tf.constant(_A )
__A : Any = compiled_tokenizer(_A )
__A : int = tf_tokenizer(_A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCAmelCase_ ( self ):
for tf_tokenizer in self.tf_tokenizers:
__A : int = ModelToSave(tokenizer=_A )
__A : List[str] = tf.convert_to_tensor(self.test_sentences )
__A : Optional[int] = model(_A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__A : List[str] = Path(_A ) / 'saved.model'
model.save(_A )
__A : Optional[int] = tf.keras.models.load_model(_A )
__A : Optional[int] = loaded_model(_A )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 720 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = '''conditional_detr'''
UpperCamelCase : int = ['''past_key_values''']
UpperCamelCase : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _A=True , _A=None , _A=3 , _A=300 , _A=6 , _A=2048 , _A=8 , _A=6 , _A=2048 , _A=8 , _A=0.0 , _A=0.0 , _A=True , _A="relu" , _A=256 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1.0 , _A=False , _A="sine" , _A="resnet50" , _A=True , _A=False , _A=2 , _A=5 , _A=2 , _A=1 , _A=1 , _A=2 , _A=5 , _A=2 , _A=0.2_5 , **_A , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__A : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_A , _A ):
__A : Tuple = backbone_config.get('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[Any] = config_class.from_dict(_A )
__A : Tuple = use_timm_backbone
__A : List[str] = backbone_config
__A : Dict = num_channels
__A : int = num_queries
__A : int = d_model
__A : str = encoder_ffn_dim
__A : List[str] = encoder_layers
__A : Optional[Any] = encoder_attention_heads
__A : Union[str, Any] = decoder_ffn_dim
__A : List[Any] = decoder_layers
__A : Optional[Any] = decoder_attention_heads
__A : Any = dropout
__A : Any = attention_dropout
__A : int = activation_dropout
__A : Optional[int] = activation_function
__A : Union[str, Any] = init_std
__A : Union[str, Any] = init_xavier_std
__A : Optional[Any] = encoder_layerdrop
__A : int = decoder_layerdrop
__A : List[str] = encoder_layers
__A : str = auxiliary_loss
__A : Union[str, Any] = position_embedding_type
__A : Optional[int] = backbone
__A : List[str] = use_pretrained_backbone
__A : List[Any] = dilation
# Hungarian matcher
__A : List[str] = class_cost
__A : Optional[int] = bbox_cost
__A : Dict = giou_cost
# Loss coefficients
__A : Optional[int] = mask_loss_coefficient
__A : Union[str, Any] = dice_loss_coefficient
__A : List[Any] = cls_loss_coefficient
__A : Dict = bbox_loss_coefficient
__A : Tuple = giou_loss_coefficient
__A : Tuple = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCAmelCase_ ( self ):
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ):
return self.d_model
def UpperCAmelCase_ ( self ):
__A : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__A : Dict = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCAmelCase_ ( self ):
return 1e-5
@property
def UpperCAmelCase_ ( self ):
return 12
| 77 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _SCREAMING_SNAKE_CASE ( a , a ) -> Tuple:
__A : List[Any] = checkpoint
__A : Any = {}
__A : Any = vae_state_dict['encoder.conv_in.weight']
__A : Any = vae_state_dict['encoder.conv_in.bias']
__A : Dict = vae_state_dict['encoder.conv_out.weight']
__A : List[str] = vae_state_dict['encoder.conv_out.bias']
__A : Dict = vae_state_dict['encoder.norm_out.weight']
__A : Union[str, Any] = vae_state_dict['encoder.norm_out.bias']
__A : str = vae_state_dict['decoder.conv_in.weight']
__A : int = vae_state_dict['decoder.conv_in.bias']
__A : Dict = vae_state_dict['decoder.conv_out.weight']
__A : Tuple = vae_state_dict['decoder.conv_out.bias']
__A : List[Any] = vae_state_dict['decoder.norm_out.weight']
__A : Optional[int] = vae_state_dict['decoder.norm_out.bias']
__A : Tuple = vae_state_dict['quant_conv.weight']
__A : Tuple = vae_state_dict['quant_conv.bias']
__A : int = vae_state_dict['post_quant_conv.weight']
__A : Optional[Any] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
__A : Dict = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
__A : str = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
__A : Tuple = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
__A : Tuple = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(a )
}
for i in range(a ):
__A : int = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
__A : str = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
__A : Tuple = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
__A : Dict = renew_vae_resnet_paths(a )
__A : List[str] = {'old': F"""down.{i}.block""", 'new': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__A : Union[str, Any] = [key for key in vae_state_dict if 'encoder.mid.block' in key]
__A : Optional[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__A : Optional[Any] = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
__A : Optional[Any] = renew_vae_resnet_paths(a )
__A : Union[str, Any] = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__A : str = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
__A : str = renew_vae_attention_paths(a )
__A : str = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
__A : Union[str, Any] = num_up_blocks - 1 - i
__A : Tuple = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
__A : Optional[int] = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
__A : Optional[int] = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
__A : Union[str, Any] = renew_vae_resnet_paths(a )
__A : int = {'old': F"""up.{block_id}.block""", 'new': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__A : Dict = [key for key in vae_state_dict if 'decoder.mid.block' in key]
__A : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__A : List[Any] = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
__A : int = renew_vae_resnet_paths(a )
__A : List[Any] = {'old': F"""mid.block_{i}""", 'new': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
__A : Tuple = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
__A : Tuple = renew_vae_attention_paths(a )
__A : Any = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def _SCREAMING_SNAKE_CASE ( a , a , ) -> Optional[Any]:
# Only support V1
__A : List[Any] = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
__A : List[Any] = io.BytesIO(r.content )
__A : int = OmegaConf.load(a )
__A : Optional[Any] = 5_12
__A : List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
__A : Union[str, Any] = {}
with safe_open(a , framework='pt' , device='cpu' ) as f:
for key in f.keys():
__A : int = f.get_tensor(a )
else:
__A : Optional[int] = torch.load(a , map_location=a )['state_dict']
# Convert the VAE model.
__A : List[Any] = create_vae_diffusers_config(a , image_size=a )
__A : List[Any] = custom_convert_ldm_vae_checkpoint(a , a )
__A : List[str] = AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 721 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _A( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
__A : List[str] = nn.Linear(3 , 4 )
__A : Optional[Any] = nn.BatchNormad(4 )
__A : List[Any] = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self , _A ):
return self.lineara(self.batchnorm(self.lineara(_A ) ) )
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , model.state_dict() )
__A : str = os.path.join(_A , 'index.json' )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__A : Optional[int] = os.path.join(_A , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase_ ( self ):
__A : Dict = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__A : Tuple = torch.randn(2 , 3 , dtype=_A )
with TemporaryDirectory() as tmp_dir:
__A : int = offload_weight(_A , 'weight' , _A , {} )
__A : Union[str, Any] = os.path.join(_A , 'weight.dat' )
self.assertTrue(os.path.isfile(_A ) )
self.assertDictEqual(_A , {'weight': {'shape': [2, 3], 'dtype': str(_A ).split('.' )[1]}} )
__A : List[str] = load_offloaded_weight(_A , index['weight'] )
self.assertTrue(torch.equal(_A , _A ) )
def UpperCAmelCase_ ( self ):
__A : int = ModelForTest()
__A : Union[str, Any] = model.state_dict()
__A : Optional[Any] = {k: v for k, v in state_dict.items() if 'linear2' not in k}
__A : str = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : List[str] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
__A : Union[str, Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
__A : List[Any] = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : Optional[int] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
# Duplicates are removed
__A : str = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
def UpperCAmelCase_ ( self ):
__A : Dict = {'a.1': 0, 'a.10': 1, 'a.2': 2}
__A : str = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1': 0, 'a.2': 2} )
__A : Optional[Any] = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
__A : Any = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1.a': 0, 'a.2.a': 2} )
| 77 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( a , a ) -> float:
if digit_amount > 0:
return round(number - int(a ) , a )
return number - int(a )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A ):
__A : Any = data
def __iter__( self ):
for element in self.data:
yield element
def _SCREAMING_SNAKE_CASE ( a=True ) -> Any:
__A : List[Any] = Accelerator(even_batches=a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _SCREAMING_SNAKE_CASE ( a , a , a , a = False ) -> str:
if iterable:
__A : int = DummyIterableDataset(torch.as_tensor(range(a ) ) )
else:
__A : Optional[Any] = TensorDataset(torch.as_tensor(range(a ) ) )
__A : Optional[Any] = DataLoader(a , batch_size=a )
__A : Optional[int] = accelerator.prepare(a )
return dl
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a , ) -> Union[str, Any]:
__A : Optional[int] = create_dataloader(accelerator=a , dataset_size=a , batch_size=a )
__A : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : int = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : str = create_accelerator(even_batches=a )
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : Optional[Any] = create_accelerator(even_batches=a )
__A : str = torch.nn.Linear(1 , 1 )
__A : Optional[int] = accelerator.prepare(a )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a ):
__A : Dict = ddp_model(batch[0].float() )
__A : List[str] = output.sum()
loss.backward()
batch_idxs.append(a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for multi-GPU" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : int = True
__A : Union[str, Any] = False
__A : Optional[int] = create_accelerator(even_batches=a )
__A : int = torch.nn.Linear(1 , 1 )
__A : List[Any] = accelerator.prepare(a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : List[str] = train_dl.batch_sampler.even_batches
__A : Dict = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Any = True
__A : List[Any] = False
__A : Tuple = create_accelerator(even_batches=a )
__A : List[str] = torch.nn.Linear(1 , 1 )
__A : Optional[Any] = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : Tuple = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__A : Any = create_accelerator()
__A : Union[str, Any] = torch.nn.Linear(1 , 1 )
__A : str = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for map-style datasets" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : str = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
__A : int = accelerator.state.distributed_type
__A : Tuple = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a )
__A : str = original_state
if __name__ == "__main__":
main()
| 77 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase : str = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = ['''input_features''', '''is_longer''']
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ):
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
__A : int = top_db
__A : Optional[Any] = truncation
__A : str = padding
__A : int = fft_window_size
__A : Any = (fft_window_size >> 1) + 1
__A : List[str] = hop_length
__A : List[str] = max_length_s
__A : List[Any] = max_length_s * sampling_rate
__A : str = sampling_rate
__A : List[Any] = frequency_min
__A : Any = frequency_max
__A : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='htk' , )
__A : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='slaney' , mel_scale='slaney' , )
def UpperCAmelCase_ ( self ):
__A : List[Any] = copy.deepcopy(self.__dict__ )
__A : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : List[str] = spectrogram(
_A , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='dB' , )
return log_mel_spectrogram.T
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__A : Tuple = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__A : Union[str, Any] = [0]
# randomly choose index for each part
__A : Union[str, Any] = np.random.choice(ranges[0] )
__A : Union[str, Any] = np.random.choice(ranges[1] )
__A : Tuple = np.random.choice(ranges[2] )
__A : Any = mel[idx_front : idx_front + chunk_frames, :]
__A : Union[str, Any] = mel[idx_middle : idx_middle + chunk_frames, :]
__A : List[Any] = mel[idx_back : idx_back + chunk_frames, :]
__A : Union[str, Any] = torch.tensor(mel[None, None, :] )
__A : Any = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='bilinear' , align_corners=_A )
__A : List[Any] = mel_shrink[0][0].numpy()
__A : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCAmelCase_ ( self , _A , _A , _A , _A ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__A : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__A : int = len(_A ) - max_length
__A : Tuple = np.random.randint(0 , overflow + 1 )
__A : int = waveform[idx : idx + max_length]
__A : Union[str, Any] = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__A : Tuple = self._np_extract_fbank_features(_A , self.mel_filters )
__A : Union[str, Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__A : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__A : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
__A : Optional[Any] = False
else:
__A : Any = self._random_mel_fusion(_A , _A , _A )
__A : List[str] = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
__A : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__A : Optional[int] = int(max_length / len(_A ) )
__A : Optional[Any] = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__A : List[str] = int(max_length / len(_A ) )
__A : List[Any] = np.stack(np.tile(_A , _A ) )
__A : Dict = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
__A : Dict = self._np_extract_fbank_features(_A , self.mel_filters )
__A : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__A : Any = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ):
__A : Dict = truncation if truncation is not None else self.truncation
__A : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__A : Tuple = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
__A : Tuple = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__A : List[Any] = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__A : str = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__A : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__A : str = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
__A : Optional[Any] = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
__A : int = []
__A : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__A : Optional[int] = np.random.randint(0 , len(_A ) )
__A : Union[str, Any] = True
if isinstance(input_mel[0] , _A ):
__A : Optional[int] = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__A : Optional[Any] = [[longer] for longer in is_longer]
__A : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
__A : Dict = BatchFeature(_A )
if return_tensors is not None:
__A : Optional[Any] = input_features.convert_to_tensors(_A )
return input_features
| 701 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : str = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = '''codegen'''
UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _A=50400 , _A=2048 , _A=2048 , _A=4096 , _A=28 , _A=16 , _A=64 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.0_2 , _A=True , _A=50256 , _A=50256 , _A=False , **_A , ):
__A : Any = vocab_size
__A : Tuple = n_ctx
__A : Union[str, Any] = n_positions
__A : Optional[Any] = n_embd
__A : Any = n_layer
__A : Dict = n_head
__A : Union[str, Any] = n_inner
__A : List[Any] = rotary_dim
__A : str = activation_function
__A : Any = resid_pdrop
__A : Tuple = embd_pdrop
__A : Tuple = attn_pdrop
__A : Union[str, Any] = layer_norm_epsilon
__A : str = initializer_range
__A : Optional[Any] = use_cache
__A : Union[str, Any] = bos_token_id
__A : Tuple = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , 'pad_token_id' , _A ):
# TODO: how to do that better?
__A : Dict = 0
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
__A : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__A : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self ):
return self._config.n_layer
@property
def UpperCAmelCase_ ( self ):
return self._config.n_head
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : Any = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__A : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__A : Any = seqlen + 2
__A : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A : Optional[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__A : Tuple = common_inputs['attention_mask']
if self.use_past:
__A : str = ordered_inputs['attention_mask'].dtype
__A : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self ):
return 13
| 77 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCAmelCase : Any = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : List[str] = torch.load(a , map_location='cpu' )
return sd
def _SCREAMING_SNAKE_CASE ( a , a , a=rename_keys_prefix ) -> int:
__A : Any = OrderedDict()
__A : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__A : List[Any] = key
for name_pair in rename_keys_prefix:
__A : Union[str, Any] = new_key.replace(name_pair[0] , name_pair[1] )
__A : str = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__A : Union[str, Any] = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
__A : List[Any] = 'pretraining'
if "vcr" in checkpoint_path:
__A : List[Any] = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
__A : Optional[Any] = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
__A : List[str] = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
__A : Optional[int] = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
__A : Optional[Any] = {'visual_embedding_dim': 5_12}
__A : int = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__A : Dict = {'visual_embedding_dim': 20_48}
__A : int = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__A : Any = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
__A : Any = 'vqa'
elif "nlvr" in checkpoint_path:
__A : Any = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
__A : List[str] = 'nlvr'
__A : Dict = VisualBertConfig(**a )
# Load State Dict
__A : Tuple = load_state_dict(a )
__A : Union[str, Any] = get_new_dict(a , a )
if model_type == "pretraining":
__A : Optional[int] = VisualBertForPreTraining(a )
elif model_type == "vqa":
__A : Any = VisualBertForQuestionAnswering(a )
elif model_type == "nlvr":
__A : Tuple = VisualBertForVisualReasoning(a )
elif model_type == "multichoice":
__A : Union[str, Any] = VisualBertForMultipleChoice(a )
model.load_state_dict(a )
# Save Checkpoints
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase : Dict = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 702 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : list[list[int]] = [[0 for _ in range(a )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__A : Optional[int] = 1
for n in range(m + 1 ):
for k in range(1 , a ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase : str = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 703 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : Union[str, Any] = ''''''
UpperCAmelCase : Optional[int] = ''''''
UpperCAmelCase : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ) -> None:
__A , __A : List[Any] = get_dataset(a , a )
print('Processing...' )
__A , __A , __A : Optional[Any] = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__A : Optional[int] = random_chars(32 )
__A : Dict = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__A : Dict = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(a )} with {file_name}""" )
__A : int = []
for anno in new_annos[index]:
__A : Any = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a )
with open(F"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> tuple[list, list]:
__A : int = []
__A : List[Any] = []
for label_file in glob.glob(os.path.join(a , '*.txt' ) ):
__A : List[str] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(a ) as in_file:
__A : Tuple = in_file.readlines()
__A : Dict = os.path.join(a , F"""{label_name}.jpg""" )
__A : Dict = []
for obj_list in obj_lists:
__A : int = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( a , a , a = 1 ) -> tuple[list, list, list]:
__A : int = []
__A : Optional[Any] = []
__A : Dict = []
for idx in range(len(a ) ):
__A : Dict = []
__A : Optional[Any] = img_list[idx]
path_list.append(a )
__A : Union[str, Any] = anno_list[idx]
__A : Optional[Any] = cva.imread(a )
if flip_type == 1:
__A : Any = cva.flip(a , a )
for bbox in img_annos:
__A : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__A : Union[str, Any] = cva.flip(a , a )
for bbox in img_annos:
__A : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( a = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__A : List[Any] = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 77 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Optional[int] = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase : Union[str, Any] = {
'''gpt-neox-20b''': 20_48,
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , _A=None , _A=None , _A=None , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|endoftext|>" , _A=False , **_A , ):
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
__A : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _A ) != add_prefix_space:
__A : List[Any] = getattr(_A , pre_tok_state.pop('type' ) )
__A : Union[str, Any] = add_prefix_space
__A : Optional[Any] = pre_tok_class(**_A )
__A : str = add_prefix_space
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def UpperCAmelCase_ ( self , _A ):
__A : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
__A : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 704 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
__A : Union[str, Any] = parent
__A : List[str] = batch_size
__A : Optional[int] = seq_length
__A : List[Any] = is_training
__A : Optional[Any] = use_input_mask
__A : List[Any] = use_token_type_ids
__A : Optional[Any] = use_labels
__A : List[str] = vocab_size
__A : Optional[int] = hidden_size
__A : List[Any] = num_hidden_layers
__A : int = num_attention_heads
__A : Dict = intermediate_size
__A : Any = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Dict = type_vocab_size
__A : Any = type_sequence_label_size
__A : Dict = initializer_range
__A : str = num_labels
__A : Union[str, Any] = num_choices
__A : str = scope
def UpperCAmelCase_ ( self ):
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_input_mask:
__A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = None
__A : List[Any] = None
__A : List[Any] = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : List[str] = LlamaModel(config=_A )
model.to(_A )
model.eval()
__A : Any = model(_A , attention_mask=_A )
__A : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Dict = True
__A : int = LlamaModel(_A )
model.to(_A )
model.eval()
__A : str = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
__A : List[Any] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Optional[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : int = True
__A : List[Any] = True
__A : List[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__A : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
__A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : str = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0]
__A : Union[str, Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Tuple = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : int = False
UpperCamelCase : Dict = False
def UpperCAmelCase_ ( self ):
__A : List[Any] = LlamaModelTester(self )
__A : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : int = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : Optional[int] = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(_A )
__A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = 3
__A : Tuple = 'single_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[int] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : int = 'multi_label_classification'
__A : int = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : List[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def UpperCAmelCase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCAmelCase_ ( self , _A ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = ids_tensor([1, 10] , config.vocab_size )
__A : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = LlamaModel(_A )
original_model.to(_A )
original_model.eval()
__A : Dict = original_model(_A ).last_hidden_state
__A : int = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : int = {'type': scaling_type, 'factor': 1_0.0}
__A : str = LlamaModel(_A )
scaled_model.to(_A )
scaled_model.eval()
__A : Dict = scaled_model(_A ).last_hidden_state
__A : str = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
__A : Union[str, Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__A : Optional[int] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : str = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[str] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
__A : int = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : List[str] = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
__A : Optional[int] = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : Optional[Any] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
__A : List[Any] = model(torch.tensor(_A ) )
__A : Tuple = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# fmt: off
__A : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
__A : List[str] = 'Simply put, the theory of relativity states that '
__A : Union[str, Any] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
__A : List[str] = tokenizer.encode(_A , return_tensors='pt' )
__A : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_A )
# greedy generation outputs
__A : Union[str, Any] = model.generate(_A , max_new_tokens=64 , top_p=_A , temperature=1 , do_sample=_A )
__A : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a = 10**12 ) -> int:
__A : Union[str, Any] = 1
__A : Optional[int] = 0
__A : int = 1
__A : Optional[int] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 705 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase : str = HfApi()
UpperCAmelCase : List[str] = {}
# fmt: off
UpperCAmelCase : Optional[Any] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCAmelCase : Dict = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCAmelCase : str = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCAmelCase : Optional[Any] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCAmelCase : List[Any] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCAmelCase : Optional[int] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCAmelCase : Tuple = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCAmelCase : Any = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCAmelCase : Tuple = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCAmelCase : Dict = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCAmelCase : Tuple = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCAmelCase : List[str] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCAmelCase : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase : Union[str, Any] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
UpperCAmelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
UpperCAmelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase : int = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase : Optional[int] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase : Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 77 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Dict:
__A : Tuple = multiprocessing.Manager()
__A : int = manager.list()
__A : Dict = multiprocessing.Process(target=a , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Dict:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__A : Any = shutil.rmtree
__A : Tuple = os.rmdir
__A : Tuple = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__A : Tuple = {}
with swallow_io():
with time_limit(a ):
exec(a , a )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"""failed: {e}""" )
# Needed for cleaning up.
__A : Dict = rmtree
__A : str = rmdir
__A : Optional[int] = chdir
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( a ) -> Union[str, Any]:
def signal_handler(a , a ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , a )
signal.signal(signal.SIGALRM , a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( ) -> Any:
__A : Any = WriteOnlyStringIO()
with contextlib.redirect_stdout(a ):
with contextlib.redirect_stderr(a ):
with redirect_stdin(a ):
yield
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( ) -> str:
with tempfile.TemporaryDirectory() as dirname:
with chdir(a ):
yield dirname
class _A( snake_case__ ):
"""simple docstring"""
pass
class _A( io.StringIO ):
"""simple docstring"""
def UpperCAmelCase_ ( self , *_A , **_A ):
raise OSError
def UpperCAmelCase_ ( self , *_A , **_A ):
raise OSError
def UpperCAmelCase_ ( self , *_A , **_A ):
raise OSError
def UpperCAmelCase_ ( self , *_A , **_A ):
return False
class _A( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
UpperCamelCase : Any = '''stdin'''
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
if root == ".":
yield
return
__A : str = os.getcwd()
os.chdir(a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a )
def _SCREAMING_SNAKE_CASE ( a=None ) -> List[Any]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__A : List[Any] = None
__A : Tuple = None
import os
__A : int = '1'
__A : List[str] = None
__A : Union[str, Any] = None
__A : Dict = None
__A : str = None
__A : str = None
__A : Optional[int] = None
__A : Dict = None
__A : int = None
__A : Tuple = None
__A : Optional[Any] = None
__A : Optional[int] = None
__A : int = None
__A : str = None
__A : Tuple = None
__A : List[Any] = None
__A : Union[str, Any] = None
__A : List[str] = None
__A : Optional[Any] = None
__A : Union[str, Any] = None
__A : Union[str, Any] = None
__A : Any = None
__A : Union[str, Any] = None
__A : List[str] = None
__A : Union[str, Any] = None
__A : List[Any] = None
__A : Optional[int] = None
__A : Union[str, Any] = None
import shutil
__A : str = None
__A : Dict = None
__A : List[str] = None
import subprocess
__A : Any = None # type: ignore
__A : Optional[Any] = None
import sys
__A : int = None
__A : Any = None
__A : Any = None
__A : Union[str, Any] = None
__A : Tuple = None
| 706 |
import numpy as np
from PIL import Image
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> np.ndarray:
__A : Union[str, Any] = np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__A : List[Any] = 0
__A : Optional[Any] = 0
__A : List[Any] = 0
__A : Dict = 0
# compute the shape of the output matrix
__A : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__A : Optional[int] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__A : Tuple = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A : List[str] = 0
__A : Union[str, Any] = 0
return updated_arr
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> np.ndarray:
__A : List[Any] = np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__A : Dict = 0
__A : str = 0
__A : Tuple = 0
__A : Optional[int] = 0
# compute the shape of the output matrix
__A : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__A : Any = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__A : Tuple = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A : Dict = 0
__A : int = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
UpperCAmelCase : int = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
__A : List[str] = ''
for word_or_phrase in separated:
if not isinstance(a , a ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707 |
from __future__ import annotations
from collections.abc import Callable
def _SCREAMING_SNAKE_CASE ( a , a , a , a = 1_00 , ) -> float:
__A : Any = x_start
__A : List[str] = fnc(a )
__A : Optional[Any] = 0.0
for _ in range(a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__A : Any = (x_end - x_start) / steps + xa
__A : List[str] = fnc(a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__A : Any = xa
__A : Dict = fxa
return area
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
UpperCAmelCase : Tuple = 10
while i <= 10_00_00:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 77 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _SCREAMING_SNAKE_CASE ( a ) -> Any:
__A : str = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(a , a )
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[Any]:
__A : Optional[Any] = emb.weight.shape
__A : Optional[Any] = nn.Linear(a , a , bias=a )
__A : int = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( a ) -> Any:
__A : Optional[Any] = torch.load(a , map_location='cpu' )
__A : Optional[int] = Namespace(**checkpoint['cfg']['model'] )
__A : int = checkpoint['model']
remove_ignore_keys_(a )
__A : List[str] = state_dict['decoder.embed_tokens.weight'].shape[0]
__A : Any = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
__A : Optional[Any] = XGLMConfig(
vocab_size=a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__A : Optional[Any] = XGLMForCausalLM(a )
__A : List[Any] = model.load_state_dict(a , strict=a )
print(a )
__A : Union[str, Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase : List[str] = parser.parse_args()
UpperCAmelCase : Tuple = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 708 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _SCREAMING_SNAKE_CASE ( ) -> None:
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[tuple[int, int], tuple[int, int]]:
print('Generating prime p...' )
__A : Optional[Any] = rabinMiller.generate_large_prime(a )
print('Generating prime q...' )
__A : Union[str, Any] = rabinMiller.generate_large_prime(a )
__A : Tuple = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
__A : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(a , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
__A : Any = cryptoMath.find_mod_inverse(a , (p - 1) * (q - 1) )
__A : Dict = (n, e)
__A : Dict = (n, d)
return (public_key, private_key)
def _SCREAMING_SNAKE_CASE ( a , a ) -> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__A , __A : Optional[int] = generate_key(a )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 77 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : Union[str, Any] = ''''''
UpperCAmelCase : Optional[int] = ''''''
UpperCAmelCase : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ) -> None:
__A : List[Any] = get_dataset(a , a )
print('Processing...' )
__A : Optional[Any] = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__A : Optional[int] = random_chars(32 )
__A : Dict = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__A : Dict = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(a )} with {file_name}""" )
__A : int = []
for anno in new_annos[index]:
__A : Any = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a )
with open(F"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> tuple[list, list]:
__A : int = []
__A : List[Any] = []
for label_file in glob.glob(os.path.join(a , '*.txt' ) ):
__A : List[str] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(a ) as in_file:
__A : Tuple = in_file.readlines()
__A : Dict = os.path.join(a , F"""{label_name}.jpg""" )
__A : Dict = []
for obj_list in obj_lists:
__A : int = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( a , a , a = 1 ) -> tuple[list, list, list]:
__A : int = []
__A : Optional[Any] = []
__A : Dict = []
for idx in range(len(a ) ):
__A : Dict = []
__A : Optional[Any] = img_list[idx]
path_list.append(a )
__A : Union[str, Any] = anno_list[idx]
__A : Optional[Any] = cva.imread(a )
if flip_type == 1:
__A : Any = cva.flip(a , a )
for bbox in img_annos:
__A : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__A : Union[str, Any] = cva.flip(a , a )
for bbox in img_annos:
__A : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( a = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__A : List[Any] = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 709 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = ProphetNetTokenizer
UpperCamelCase : Tuple = False
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = 'UNwant\u00E9d,running'
__A : List[str] = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self ):
__A : Tuple = self.tokenizer_class(self.vocab_file )
__A : List[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase_ ( self ):
__A : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCAmelCase_ ( self ):
__A : List[str] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Dict = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : List[Any] = BasicTokenizer(do_lower_case=_A , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__A : Optional[int] = {}
for i, token in enumerate(_A ):
__A : Tuple = i
__A : Tuple = WordpieceTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def UpperCAmelCase_ ( self ):
__A : int = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__A : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__A : str = tokenizer(_A , padding=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
__A : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Any = tokenizer.encode('sequence builders' , add_special_tokens=_A )
__A : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_A )
__A : str = tokenizer.build_inputs_with_special_tokens(_A )
__A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 77 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=18 , _A=30 , _A=400 , _A=True , _A=32 , _A=True , ):
__A : Optional[int] = parent
__A : List[str] = batch_size
__A : int = num_channels
__A : List[str] = image_size
__A : Any = min_resolution
__A : Optional[Any] = max_resolution
__A : Optional[int] = do_resize
__A : str = size_divisor
__A : Dict = do_rescale
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = GLPNImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : List[Any] = GLPNImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size_divisor' ) )
self.assertTrue(hasattr(_A , 'resample' ) )
self.assertTrue(hasattr(_A , 'do_rescale' ) )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 710 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Any = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase : Optional[int] = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
UpperCAmelCase : List[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[str] = BertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
__A : Any = getattr(_A , normalizer_state.pop('type' ) )
__A : Union[str, Any] = do_lower_case
__A : Optional[int] = strip_accents
__A : List[Any] = tokenize_chinese_chars
__A : int = normalizer_class(**_A )
__A : Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self , _A , _A=None ):
__A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Optional[Any] = [self.sep_token_id]
__A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 77 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _A:
"""simple docstring"""
def __init__( self , _A , _A=2 , _A=True , _A=False , _A=10 , _A=3 , _A=32 * 4 , _A=32 * 6 , _A=4 , _A=32 , ):
__A : str = parent
__A : List[Any] = batch_size
__A : Optional[int] = is_training
__A : List[str] = use_auxiliary_loss
__A : List[str] = num_queries
__A : Tuple = num_channels
__A : Any = min_size
__A : Union[str, Any] = max_size
__A : str = num_labels
__A : str = mask_feature_size
def UpperCAmelCase_ ( self ):
__A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_A )
__A : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_A )
__A : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_A ) > 0.5
).float()
__A : Tuple = (torch.rand((self.batch_size, self.num_labels) , device=_A ) > 0.5).long()
__A : Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.prepare_config_and_inputs()
__A : Dict = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self , _A , _A ):
__A : Any = output.encoder_hidden_states
__A : Union[str, Any] = output.pixel_decoder_hidden_states
__A : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self , _A , _A , _A , _A=False ):
with torch.no_grad():
__A : Optional[Any] = MaskFormerModel(config=_A )
model.to(_A )
model.eval()
__A : int = model(pixel_values=_A , pixel_mask=_A )
__A : List[Any] = model(_A , output_hidden_states=_A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_A , _A )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A ):
__A : int = MaskFormerForInstanceSegmentation(config=_A )
model.to(_A )
model.eval()
def comm_check_on_output(_A ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__A : List[Any] = model(pixel_values=_A , pixel_mask=_A )
__A : Dict = model(_A )
comm_check_on_output(_A )
__A : List[Any] = model(
pixel_values=_A , pixel_mask=_A , mask_labels=_A , class_labels=_A )
comm_check_on_output(_A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _A( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase : List[Any] = False
UpperCamelCase : int = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Any = False
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = MaskFormerModelTester(self )
__A : str = ConfigTester(self , config_class=_A , has_text_modality=_A )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A , **_A , output_hidden_states=_A )
def UpperCAmelCase_ ( self ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_A )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def UpperCAmelCase_ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Dict = model_class(_A )
__A : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : List[Any] = [*signature.parameters.keys()]
__A : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
@slow
def UpperCAmelCase_ ( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
__A : int = MaskFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = (self.model_tester.min_size,) * 2
__A : Union[str, Any] = {
'pixel_values': torch.randn((2, 3, *size) , device=_A ),
'mask_labels': torch.randn((2, 10, *size) , device=_A ),
'class_labels': torch.zeros(2 , 10 , device=_A ).long(),
}
__A : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_A )
__A : Union[str, Any] = model(**_A )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self ):
__A : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A , **_A , output_hidden_states=_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(_A ).to(_A )
__A : Dict = model(**_A , output_attentions=_A )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__A : List[Any] = self.all_model_classes[1]
__A : Tuple = self.model_tester.prepare_config_and_inputs()
__A : Tuple = model_class(_A )
model.to(_A )
model.train()
__A : Any = model(_A , mask_labels=_A , class_labels=_A ).loss
loss.backward()
def UpperCAmelCase_ ( self ):
# only MaskFormerForInstanceSegmentation has the loss
__A : str = self.all_model_classes[1]
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
__A : str = True
__A : int = True
__A : Optional[int] = model_class(_A )
model.to(_A )
model.train()
__A : str = model(_A , mask_labels=_A , class_labels=_A )
__A : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__A : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__A : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__A : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase : Optional[Any] = 1E-4
def _SCREAMING_SNAKE_CASE ( ) -> Any:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class _A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ):
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self ):
__A : int = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(_A )
__A : List[Any] = self.default_image_processor
__A : List[str] = prepare_img()
__A : Any = image_processor(_A , return_tensors='pt' ).to(_A )
__A : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 800, 1088) )
with torch.no_grad():
__A : Dict = model(**_A )
__A : List[Any] = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
__A : Any = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
__A : Tuple = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _A , atol=_A ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_A )
.eval()
)
__A : List[str] = self.default_image_processor
__A : int = prepare_img()
__A : str = image_processor(_A , return_tensors='pt' ).to(_A )
__A : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 800, 1088) )
with torch.no_grad():
__A : Optional[int] = model(**_A )
# masks_queries_logits
__A : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__A : int = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
__A : Dict = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) )
# class_queries_logits
__A : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__A : Optional[int] = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) )
def UpperCAmelCase_ ( self ):
__A : List[str] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(_A )
.eval()
)
__A : Optional[Any] = self.default_image_processor
__A : Any = prepare_img()
__A : Union[str, Any] = image_processor(_A , return_tensors='pt' ).to(_A )
__A : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 800, 1088) )
with torch.no_grad():
__A : int = model(**_A )
# masks_queries_logits
__A : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__A : Any = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -10.7711]]
__A : Optional[Any] = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) )
# class_queries_logits
__A : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__A : List[str] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) )
def UpperCAmelCase_ ( self ):
__A : List[str] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_A )
.eval()
)
__A : Any = self.default_image_processor
__A : Optional[int] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__A : Optional[Any] = inputs['pixel_values'].to(_A )
__A : Any = [el.to(_A ) for el in inputs['mask_labels']]
__A : Optional[Any] = [el.to(_A ) for el in inputs['class_labels']]
with torch.no_grad():
__A : Optional[Any] = model(**_A )
self.assertTrue(outputs.loss is not None )
| 711 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
debug_launcher(test_script.main )
def UpperCAmelCase_ ( self ):
debug_launcher(test_ops.main )
| 77 | 0 |
from collections import namedtuple
UpperCAmelCase : Any = namedtuple('''from_to''', '''from_ to''')
UpperCAmelCase : Any = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00454, 264.172),
'''cubicyard''': from_to(0.76455, 1.30795),
'''cubicfoot''': from_to(0.028, 35.3147),
'''cup''': from_to(0.000236588, 4226.75),
}
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Tuple = tempfile.mkdtemp()
# fmt: off
__A : Union[str, Any] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Dict = dict(zip(_A , range(len(_A ) ) ) )
__A : int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : Optional[Any] = {'unk_token': '<unk>'}
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : Union[str, Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : List[str] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_tokenizer()
__A : Dict = self.get_rust_tokenizer()
__A : Optional[Any] = self.get_image_processor()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Any = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : int = self.get_image_processor(do_normalize=_A )
__A : int = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : List[Any] = self.prepare_image_inputs()
__A : Any = image_processor(_A , return_tensors='np' )
__A : Tuple = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = 'lower newer'
__A : Any = processor(text=_A , return_tensors='np' )
__A : Dict = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Tuple = 'lower newer'
__A : Union[str, Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Any = ['cat', 'nasa badge']
__A : List[Any] = processor(text=_A )
__A : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : int = [['cat', 'nasa badge'], ['person']]
__A : str = processor(text=_A )
__A : int = 16
__A : Optional[int] = len(_A )
__A : int = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : int = 'google/owlvit-base-patch32'
__A : List[str] = OwlViTProcessor.from_pretrained(_A )
__A : Tuple = ['cat', 'nasa badge']
__A : Dict = processor(text=_A )
__A : Tuple = 16
__A : str = inputs['input_ids']
__A : str = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Dict = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = self.prepare_image_inputs()
__A : Tuple = self.prepare_image_inputs()
__A : Any = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Union[str, Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 77 | 0 |
import math
def _SCREAMING_SNAKE_CASE ( a = 1_00 ) -> int:
__A : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
__A : Optional[int] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Tuple:
for attribute in key.split('.' ):
__A : Dict = getattr(a , a )
if weight_type is not None:
__A : Any = getattr(a , a ).shape
else:
__A : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__A : Union[str, Any] = value
elif weight_type == "weight_g":
__A : Dict = value
elif weight_type == "weight_v":
__A : Optional[int] = value
elif weight_type == "bias":
__A : int = value
elif weight_type == "running_mean":
__A : Union[str, Any] = value
elif weight_type == "running_var":
__A : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
__A : Any = value
elif weight_type == "inv_freq":
__A : Optional[Any] = value
else:
__A : int = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Union[str, Any]:
__A : Any = []
__A : Optional[int] = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__A : int = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__A : Any = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : Optional[Any] = True
if "*" in mapped_key:
__A : str = name.split(a )[0].split('.' )[-2]
__A : int = mapped_key.replace('*' , a )
if "pos_bias_u" in name:
__A : Optional[int] = None
elif "pos_bias_v" in name:
__A : Dict = None
elif "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Dict = 'weight_v'
elif "bias" in name:
__A : Tuple = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : int = 'weight'
elif "running_mean" in name:
__A : str = 'running_mean'
elif "inv_freq" in name:
__A : List[Any] = 'inv_freq'
elif "running_var" in name:
__A : Union[str, Any] = 'running_var'
elif "num_batches_tracked" in name:
__A : Optional[Any] = 'num_batches_tracked'
else:
__A : List[str] = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Any:
__A : str = full_name.split('conv_layers.' )[-1]
__A : str = name.split('.' )
__A : Dict = int(items[0] )
__A : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__A : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__A : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a , a=None , a=None , a=True ) -> Any:
if config_path is not None:
__A : Tuple = WavaVecaConformerConfig.from_pretrained(a , hidden_act='swish' )
else:
__A : Optional[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__A : Dict = 'rotary'
if is_finetuned:
if dict_path:
__A : Dict = Dictionary.load(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A : int = target_dict.pad_index
__A : List[Any] = target_dict.bos_index
__A : Any = target_dict.eos_index
__A : Dict = len(target_dict.symbols )
__A : Optional[Any] = os.path.join(a , 'vocab.json' )
if not os.path.isdir(a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(a ) )
return
os.makedirs(a , exist_ok=a )
__A : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__A : int = 0
__A : Optional[Any] = 1
with open(a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(a , a )
__A : Optional[Any] = WavaVecaCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=a , )
__A : Tuple = True if config.feat_extract_norm == 'layer' else False
__A : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
__A : Optional[int] = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
__A : List[Any] = WavaVecaConformerForCTC(a )
else:
__A : List[Any] = WavaVecaConformerForPreTraining(a )
if is_finetuned:
__A , __A , __A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__A : Optional[Any] = argparse.Namespace(task='audio_pretraining' )
__A : str = fairseq.tasks.setup_task(a )
__A , __A , __A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a )
__A : Tuple = model[0].eval()
recursively_load_weights(a , a , not is_finetuned )
hf_wavavec.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 77 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Tuple = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase : Any = {
'''camembert-base''': 5_12,
}
UpperCAmelCase : Optional[Any] = '''▁'''
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Any = ['''input_ids''', '''attention_mask''']
UpperCamelCase : Any = CamembertTokenizer
def __init__( self , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=["<s>NOTUSED", "</s>NOTUSED"] , **_A , ):
# Mask token behave like a normal word, i.e. include the space before it
__A : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , additional_special_tokens=_A , **_A , )
__A : Dict = vocab_file
__A : Any = False if not self.vocab_file else True
def UpperCAmelCase_ ( self , _A , _A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
__A : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Optional[int] = [self.sep_token_id]
__A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self , _A , _A = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : List[str] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 714 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A( snake_case__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( _A ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self ):
raise NotImplementedError()
| 77 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
UpperCAmelCase : str = '''sshleifer/bart-tiny-random'''
UpperCAmelCase : List[str] = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ):
return AutoConfig.from_pretrained(_A )
def UpperCAmelCase_ ( self ):
__A : List[Any] = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=_A )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=_A )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCAmelCase_ ( self ):
__A : Dict = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCAmelCase_ ( self ):
with self.assertRaises(_A ):
create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=_A , d=_A )
| 715 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Optional[int] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 77 | 0 |
import torch
from diffusers import DiffusionPipeline
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A ):
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
def __call__( self ):
__A : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__A : List[str] = 1
__A : int = self.unet(_A , _A ).sample
__A : Union[str, Any] = self.scheduler.step(_A , _A , _A ).prev_sample
__A : List[str] = scheduler_output - scheduler_output + torch.ones_like(_A )
return result
| 716 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = ShapEPipeline
UpperCamelCase : str = ['''prompt''']
UpperCamelCase : Tuple = ['''prompt''']
UpperCamelCase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase : int = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 8
@property
def UpperCAmelCase_ ( self ):
__A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__A : Optional[Any] = PriorTransformer(**_A )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__A : List[Any] = ShapERenderer(**_A )
return model
def UpperCAmelCase_ ( self ):
__A : List[str] = self.dummy_prior
__A : Optional[int] = self.dummy_text_encoder
__A : List[Any] = self.dummy_tokenizer
__A : str = self.dummy_renderer
__A : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
__A : Any = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(_A ).startswith('mps' ):
__A : List[Any] = torch.manual_seed(_A )
else:
__A : Dict = torch.Generator(device=_A ).manual_seed(_A )
__A : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : Tuple = 'cpu'
__A : Any = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**_A )
__A : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Tuple = pipe(**self.get_dummy_inputs(_A ) )
__A : int = output.images[0]
__A : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__A : Any = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self ):
__A : List[str] = torch_device == 'cpu'
__A : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCAmelCase_ ( self ):
__A : Any = self.get_dummy_components()
__A : Any = self.pipeline_class(**_A )
__A : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Any = 1
__A : Dict = 2
__A : Tuple = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
__A : Optional[int] = batch_size * [inputs[key]]
__A : Optional[int] = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__A : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__A : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : str = torch.Generator(device=_A ).manual_seed(0 )
__A : Tuple = pipe(
'a shark' , generator=_A , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 77 | 0 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase : int = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _SCREAMING_SNAKE_CASE ( a ) -> Matrix | None:
if location := find_empty_location(a ):
__A : str = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
__A : Dict = digit
if sudoku(a ) is not None:
return grid
__A : List[Any] = 0
return None
def _SCREAMING_SNAKE_CASE ( a ) -> None:
for row in grid:
for cell in row:
print(a , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
UpperCAmelCase : Tuple = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 717 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if len(a ) != 2 or len(a[0] ) != 2 or len(a ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__A : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[int]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[list, list, list, list]:
if len(a ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__A : str = len(a )
__A : List[Any] = matrix_length // 2
__A : List[str] = [[a[i][j] for j in range(a , a )] for i in range(a )]
__A : Dict = [
[a[i][j] for j in range(a , a )] for i in range(a , a )
]
__A : int = [[a[i][j] for j in range(a )] for i in range(a )]
__A : Any = [[a[i][j] for j in range(a )] for i in range(a , a )]
return top_left, top_right, bot_left, bot_right
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[int, int]:
return len(a ), len(matrix[0] )
def _SCREAMING_SNAKE_CASE ( a ) -> None:
print('\n'.join(str(a ) for line in matrix ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a ) == (2, 2):
return default_matrix_multiplication(a , a )
__A , __A , __A , __A : str = split_matrix(a )
__A , __A , __A , __A : List[Any] = split_matrix(a )
__A : Any = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Tuple = actual_strassen(matrix_addition(a , a ) , a )
__A : List[str] = actual_strassen(matrix_addition(a , a ) , a )
__A : Optional[int] = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Any = actual_strassen(matrix_addition(a , a ) , matrix_addition(a , a ) )
__A : Any = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
__A : Union[str, Any] = matrix_addition(a , a )
__A : str = matrix_addition(a , a )
__A : Dict = matrix_subtraction(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
# construct the new matrix from our 4 quadrants
__A : List[Any] = []
for i in range(len(a ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a )[1] != matrix_dimensions(a )[0]:
__A : Dict = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a )
__A : int = matrix_dimensions(a )
__A : Any = matrix_dimensions(a )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__A : List[Any] = max(*a , *a )
__A : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(a ) ) ) )
__A : Union[str, Any] = matrixa
__A : Optional[int] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__A : str = actual_strassen(a , a )
# Removing the additional zeros
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 77 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase : List[str] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[str] = []
__A : Tuple = []
__A : Union[str, Any] = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
__A : List[str] = len(a ) if (len(a ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(a ) , 'Postfix'.center(a ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a ) == 0:
stack.append(a ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(a ) # push x to stack
print(
x.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
while len(a ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
return "".join(a ) # return Postfix as str
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
__A : List[Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(a ) ):
if infix[i] == "(":
__A : List[str] = ')' # change "(" to ")"
elif infix[i] == ")":
__A : Any = '(' # change ")" to "("
return (infix_2_postfix(''.join(a ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase : List[str] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase : Union[str, Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 77 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ):
__A : int = parent
__A : Tuple = batch_size
__A : Optional[int] = image_size
__A : Optional[int] = num_channels
__A : int = embeddings_size
__A : List[str] = hidden_sizes
__A : Union[str, Any] = depths
__A : Optional[Any] = is_training
__A : str = use_labels
__A : Optional[Any] = hidden_act
__A : str = num_labels
__A : List[str] = scope
__A : int = len(_A )
def UpperCAmelCase_ ( self ):
__A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : List[str] = self.get_config()
return config, pixel_values
def UpperCAmelCase_ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self , _A , _A ):
__A : int = FlaxRegNetModel(config=_A )
__A : Tuple = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _A , _A ):
__A : Dict = self.num_labels
__A : List[str] = FlaxRegNetForImageClassification(config=_A )
__A : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ):
__A : Dict = self.prepare_config_and_inputs()
__A : Dict = config_and_inputs
__A : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCamelCase : List[str] = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : List[str] = False
def UpperCAmelCase_ ( self ):
__A : str = FlaxRegNetModelTester(self )
__A : Any = ConfigTester(self , config_class=_A , has_text_modality=_A )
def UpperCAmelCase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
return
def UpperCAmelCase_ ( self ):
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(_A )
__A : List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Dict = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self ):
def check_hidden_states_output(_A , _A , _A ):
__A : int = model_class(_A )
__A : str = model(**self._prepare_for_class(_A , _A ) )
__A : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A : int = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[int] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : int = True
check_hidden_states_output(_A , _A , _A )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A : int = self._prepare_for_class(_A , _A )
__A : Tuple = model_class(_A )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=_A , **_A )
with self.subTest('JIT Enabled' ):
__A : str = model_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__A : Optional[int] = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ):
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ):
__A : str = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__A : List[str] = self.default_image_processor
__A : str = prepare_img()
__A : Any = image_processor(images=_A , return_tensors='np' )
__A : Dict = model(**_A )
# verify the logits
__A : Tuple = (1, 1000)
self.assertEqual(outputs.logits.shape , _A )
__A : Tuple = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 719 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : Tuple = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase : int = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = '''mask2former'''
UpperCamelCase : Any = ['''swin''']
UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , _A = None , _A = 256 , _A = 256 , _A = 256 , _A = 1024 , _A = "relu" , _A = 6 , _A = 10 , _A = 8 , _A = 0.0 , _A = 2048 , _A = False , _A = False , _A = 4 , _A = 255 , _A = 100 , _A = 0.1 , _A = 2.0 , _A = 5.0 , _A = 5.0 , _A = 12544 , _A = 3.0 , _A = 0.7_5 , _A = 0.0_2 , _A = 1.0 , _A = True , _A = [4, 8, 16, 32] , _A = None , **_A , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__A : Optional[int] = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_A , _A ):
__A : Dict = backbone_config.pop('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[str] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
__A : Optional[int] = backbone_config
__A : Optional[Any] = feature_size
__A : Any = mask_feature_size
__A : Optional[Any] = hidden_dim
__A : Union[str, Any] = encoder_feedforward_dim
__A : Optional[Any] = activation_function
__A : List[Any] = encoder_layers
__A : Union[str, Any] = decoder_layers
__A : Dict = num_attention_heads
__A : Tuple = dropout
__A : Dict = dim_feedforward
__A : Tuple = pre_norm
__A : Dict = enforce_input_projection
__A : Optional[int] = common_stride
__A : Optional[Any] = ignore_value
__A : str = num_queries
__A : List[Any] = no_object_weight
__A : List[str] = class_weight
__A : List[Any] = mask_weight
__A : List[Any] = dice_weight
__A : Tuple = train_num_points
__A : Optional[Any] = oversample_ratio
__A : Union[str, Any] = importance_sample_ratio
__A : Union[str, Any] = init_std
__A : int = init_xavier_std
__A : Union[str, Any] = use_auxiliary_loss
__A : Union[str, Any] = feature_strides
__A : List[Any] = output_auxiliary_logits
__A : Optional[Any] = decoder_layers
super().__init__(**_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
return cls(
backbone_config=_A , **_A , )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = copy.deepcopy(self.__dict__ )
__A : List[Any] = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
| 77 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _A( yaml.SafeLoader ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A ):
__A : Optional[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
__A : List[str] = [tuple(_A ) if isinstance(_A , _A ) else key for key in keys]
__A : Tuple = Counter(_A )
__A : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCAmelCase_ ( self , _A , _A=False ):
__A : int = super().construct_mapping(_A , deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple[Optional[str], str]:
__A : List[Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__A : List[str] = full_content[1:].index('---' ) + 1
__A : List[str] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase_ ( cls , _A ):
with open(_A , encoding='utf-8' ) as readme_file:
__A : int = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def UpperCAmelCase_ ( self , _A ):
if path.exists():
with open(_A , encoding='utf-8' ) as readme_file:
__A : Tuple = readme_file.read()
else:
__A : Dict = None
__A : str = self._to_readme(_A )
with open(_A , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(_A )
def UpperCAmelCase_ ( self , _A = None ):
if readme_content is not None:
__A : str = _split_yaml_from_readme(_A )
__A : Dict = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__A : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCAmelCase_ ( cls , _A ):
__A : List[Any] = yaml.load(_A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__A : List[str] = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def UpperCAmelCase_ ( self ):
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_A , allow_unicode=_A , encoding='utf-8' , ).decode('utf-8' )
UpperCAmelCase : Tuple = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase : int = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
UpperCAmelCase : Dict = ap.parse_args()
UpperCAmelCase : Tuple = Path(args.readme_filepath)
UpperCAmelCase : Tuple = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 720 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = '''conditional_detr'''
UpperCamelCase : int = ['''past_key_values''']
UpperCamelCase : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _A=True , _A=None , _A=3 , _A=300 , _A=6 , _A=2048 , _A=8 , _A=6 , _A=2048 , _A=8 , _A=0.0 , _A=0.0 , _A=True , _A="relu" , _A=256 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1.0 , _A=False , _A="sine" , _A="resnet50" , _A=True , _A=False , _A=2 , _A=5 , _A=2 , _A=1 , _A=1 , _A=2 , _A=5 , _A=2 , _A=0.2_5 , **_A , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__A : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_A , _A ):
__A : Tuple = backbone_config.get('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[Any] = config_class.from_dict(_A )
__A : Tuple = use_timm_backbone
__A : List[str] = backbone_config
__A : Dict = num_channels
__A : int = num_queries
__A : int = d_model
__A : str = encoder_ffn_dim
__A : List[str] = encoder_layers
__A : Optional[Any] = encoder_attention_heads
__A : Union[str, Any] = decoder_ffn_dim
__A : List[Any] = decoder_layers
__A : Optional[Any] = decoder_attention_heads
__A : Any = dropout
__A : Any = attention_dropout
__A : int = activation_dropout
__A : Optional[int] = activation_function
__A : Union[str, Any] = init_std
__A : Union[str, Any] = init_xavier_std
__A : Optional[Any] = encoder_layerdrop
__A : int = decoder_layerdrop
__A : List[str] = encoder_layers
__A : str = auxiliary_loss
__A : Union[str, Any] = position_embedding_type
__A : Optional[int] = backbone
__A : List[str] = use_pretrained_backbone
__A : List[Any] = dilation
# Hungarian matcher
__A : List[str] = class_cost
__A : Optional[int] = bbox_cost
__A : Dict = giou_cost
# Loss coefficients
__A : Optional[int] = mask_loss_coefficient
__A : Union[str, Any] = dice_loss_coefficient
__A : List[Any] = cls_loss_coefficient
__A : Dict = bbox_loss_coefficient
__A : Tuple = giou_loss_coefficient
__A : Tuple = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCAmelCase_ ( self ):
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ):
return self.d_model
def UpperCAmelCase_ ( self ):
__A : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__A : Dict = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCAmelCase_ ( self ):
return 1e-5
@property
def UpperCAmelCase_ ( self ):
return 12
| 77 | 0 |
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE ( a = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(a ):
__A : Optional[Any] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(a )[1] in (".py", ".ipynb"):
yield os.path.join(a , a ).lstrip('./' )
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
return F"""{i * " "}*""" if i else "\n##"
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
__A : int = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(a ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(a )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def _SCREAMING_SNAKE_CASE ( a = "." ) -> None:
__A : List[str] = ''
for filepath in sorted(good_file_paths(a ) ):
__A : Any = os.path.split(a )
if filepath != old_path:
__A : Union[str, Any] = print_path(a , a )
__A : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
__A : Optional[int] = F"""{filepath}/{filename}""".replace(' ' , '%20' )
__A : str = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F"""{md_prefix(a )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('''.''')
| 721 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _A( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
__A : List[str] = nn.Linear(3 , 4 )
__A : Optional[Any] = nn.BatchNormad(4 )
__A : List[Any] = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self , _A ):
return self.lineara(self.batchnorm(self.lineara(_A ) ) )
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , model.state_dict() )
__A : str = os.path.join(_A , 'index.json' )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__A : Optional[int] = os.path.join(_A , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase_ ( self ):
__A : Dict = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__A : Tuple = torch.randn(2 , 3 , dtype=_A )
with TemporaryDirectory() as tmp_dir:
__A : int = offload_weight(_A , 'weight' , _A , {} )
__A : Union[str, Any] = os.path.join(_A , 'weight.dat' )
self.assertTrue(os.path.isfile(_A ) )
self.assertDictEqual(_A , {'weight': {'shape': [2, 3], 'dtype': str(_A ).split('.' )[1]}} )
__A : List[str] = load_offloaded_weight(_A , index['weight'] )
self.assertTrue(torch.equal(_A , _A ) )
def UpperCAmelCase_ ( self ):
__A : int = ModelForTest()
__A : Union[str, Any] = model.state_dict()
__A : Optional[Any] = {k: v for k, v in state_dict.items() if 'linear2' not in k}
__A : str = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : List[str] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
__A : Union[str, Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
__A : List[Any] = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : Optional[int] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
# Duplicates are removed
__A : str = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
def UpperCAmelCase_ ( self ):
__A : Dict = {'a.1': 0, 'a.10': 1, 'a.2': 2}
__A : str = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1': 0, 'a.2': 2} )
__A : Optional[Any] = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
__A : Any = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1.a': 0, 'a.2.a': 2} )
| 77 | 0 |
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCAmelCase : int = True
except ImportError:
UpperCAmelCase : Tuple = False
try:
from torch.hub import _get_torch_home
UpperCAmelCase : int = _get_torch_home()
except ImportError:
UpperCAmelCase : Tuple = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
UpperCAmelCase : Tuple = os.path.join(torch_cache_home, '''transformers''')
UpperCAmelCase : Dict = '''https://cdn.huggingface.co'''
UpperCAmelCase : Any = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
UpperCAmelCase : List[Any] = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
UpperCAmelCase : List[str] = os.path.join(PATH, '''config.yaml''')
UpperCAmelCase : Optional[Any] = os.path.join(PATH, '''attributes.txt''')
UpperCAmelCase : Union[str, Any] = os.path.join(PATH, '''objects.txt''')
UpperCAmelCase : int = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
UpperCAmelCase : int = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
UpperCAmelCase : Union[str, Any] = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
UpperCAmelCase : List[str] = '''pytorch_model.bin'''
UpperCAmelCase : List[Any] = '''config.yaml'''
def _SCREAMING_SNAKE_CASE ( a=OBJECTS , a=ATTRIBUTES ) -> List[str]:
__A : Optional[int] = []
with open(a ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
__A : Union[str, Any] = []
with open(a ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
__A : List[str] = OrderedDict()
with open(a , 'rb' ) as f:
__A : Dict = pkl.load(a )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
__A : Tuple = ckp.pop(a )
if isinstance(a , np.ndarray ):
__A : Dict = torch.tensor(a )
else:
assert isinstance(a , torch.tensor ), type(a )
__A : Dict = v
return r
class _A:
"""simple docstring"""
UpperCamelCase : Dict = {}
def __init__( self , _A , _A = "root" , _A=0 ):
__A : Union[str, Any] = name
__A : int = level
__A : Optional[Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__A : Union[str, Any] = copy.deepcopy(_A )
__A : int = copy.deepcopy(_A )
if isinstance(_A , _A ):
__A : Optional[int] = Config(_A , name=_A , level=level + 1 )
__A : List[str] = v
setattr(self , _A , _A )
__A : Optional[Any] = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , _A , _A ):
__A : List[Any] = val
__A : Any = val
__A : Tuple = key.split('.' )
__A : List[Any] = len(_A ) - 1
__A : Dict = self._pointer
if len(_A ) > 1:
for i, l in enumerate(_A ):
if hasattr(self , _A ) and isinstance(getattr(self , _A ) , _A ):
setattr(getattr(self , _A ) , '.'.join(levels[i:] ) , _A )
if l == last_level:
__A : Tuple = val
else:
__A : List[Any] = pointer[l]
def UpperCAmelCase_ ( self ):
return self._pointer
def UpperCAmelCase_ ( self , _A , _A ):
with open(F"""{file_name}""" , 'w' ) as stream:
dump(_A , _A )
def UpperCAmelCase_ ( self , _A , _A ):
with open(F"""{file_name}""" , 'w' ) as stream:
json.dump(_A , _A )
@staticmethod
def UpperCAmelCase_ ( _A ):
with open(_A ) as stream:
__A : str = load(_A , Loader=_A )
return data
def __str__( self ):
__A : int = ' '
if self._name != "root":
__A : Any = F"""{t * (self._level-1)}{self._name}:\n"""
else:
__A : Any = ''
__A : List[str] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_A , _A ):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(_A ).__name__})\n"""
__A : List[str] = level
return r[:-1]
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
__A : Union[str, Any] = cls.get_config_dict(_A , **_A )
return cls(_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
__A : Dict = kwargs.pop('cache_dir' , _A )
__A : Tuple = kwargs.pop('force_download' , _A )
__A : str = kwargs.pop('resume_download' , _A )
__A : List[Any] = kwargs.pop('proxies' , _A )
__A : Dict = kwargs.pop('local_files_only' , _A )
if os.path.isdir(_A ):
__A : int = os.path.join(_A , _A )
elif os.path.isfile(_A ) or is_remote_url(_A ):
__A : int = pretrained_model_name_or_path
else:
__A : List[str] = hf_bucket_url(_A , filename=_A , use_cdn=_A )
try:
# Load from URL or cache if already cached
__A : Union[str, Any] = cached_path(
_A , cache_dir=_A , force_download=_A , proxies=_A , resume_download=_A , local_files_only=_A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__A : List[str] = Config.load_yaml(_A )
except EnvironmentError:
__A : int = 'Can\'t load config for'
raise EnvironmentError(_A )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(_A ), kwargs
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A : List[Any] = torch.load('dump.pt' , map_location=in_tensor.device )
__A : Any = in_tensor.numpy()
__A : Optional[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(a , a , rtol=0.01 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(a , a , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %"""
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : List[str] = urlparse(a )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( a , a , a=True ) -> str:
__A : Dict = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__A : List[Any] = '/' not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def _SCREAMING_SNAKE_CASE ( a , a , a=None , a=0 , a=None , ) -> str:
__A : int = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(a , a ):
ua += "; " + "; ".join('{}/{}'.format(a , a ) for k, v in user_agent.items() )
elif isinstance(a , a ):
ua += "; " + user_agent
__A : Tuple = {'user-agent': ua}
if resume_size > 0:
__A : str = 'bytes=%d-' % (resume_size,)
__A : List[Any] = requests.get(a , stream=a , proxies=a , headers=a )
if response.status_code == 4_16: # Range not satisfiable
return
__A : List[str] = response.headers.get('Content-Length' )
__A : Optional[int] = resume_size + int(a ) if content_length is not None else None
__A : int = tqdm(
unit='B' , unit_scale=a , total=a , initial=a , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(a ) )
temp_file.write(a )
progress.close()
def _SCREAMING_SNAKE_CASE ( a , a=None , a=False , a=None , a=10 , a=False , a=None , a=False , ) -> Optional[Any]:
if cache_dir is None:
__A : Tuple = TRANSFORMERS_CACHE
if isinstance(a , a ):
__A : Dict = str(a )
os.makedirs(a , exist_ok=a )
__A : int = None
if not local_files_only:
try:
__A : Dict = requests.head(a , allow_redirects=a , proxies=a , timeout=a )
if response.status_code == 2_00:
__A : List[Any] = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__A : Any = url_to_filename(a , a )
# get cache path to put the file
__A : List[str] = os.path.join(a , a )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(a ):
return cache_path
else:
__A : str = [
file
for file in fnmatch.filter(os.listdir(a ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(a ) > 0:
return os.path.join(a , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(a ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__A : List[Any] = cache_path + '.lock'
with FileLock(a ):
# If the download just completed while the lock was activated.
if os.path.exists(a ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__A : Union[str, Any] = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(a , 'a+b' ) as f:
yield f
__A : str = _resumable_file_manager
if os.path.exists(a ):
__A : List[str] = os.stat(a ).st_size
else:
__A : str = 0
else:
__A : List[Any] = partial(tempfile.NamedTemporaryFile , dir=a , delete=a )
__A : Union[str, Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , a , temp_file.name , )
http_get(
a , a , proxies=a , resume_size=a , user_agent=a , )
os.replace(temp_file.name , a )
__A : Dict = {'url': url, 'etag': etag}
__A : str = cache_path + '.json'
with open(a , 'w' ) as meta_file:
json.dump(a , a )
return cache_path
def _SCREAMING_SNAKE_CASE ( a , a=None ) -> Optional[Any]:
__A : str = url.encode('utf-8' )
__A : Union[str, Any] = shaaaa(a )
__A : Optional[int] = url_hash.hexdigest()
if etag:
__A : Tuple = etag.encode('utf-8' )
__A : List[Any] = shaaaa(a )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( a , a=None , a=False , a=None , a=False , a=None , a=False , a=False , a=False , ) -> Optional[Any]:
if cache_dir is None:
__A : str = TRANSFORMERS_CACHE
if isinstance(a , a ):
__A : List[str] = str(a )
if isinstance(a , a ):
__A : List[Any] = str(a )
if is_remote_url(a ):
# URL, so get it from the cache (downloading if necessary)
__A : Optional[Any] = get_from_cache(
a , cache_dir=a , force_download=a , proxies=a , resume_download=a , user_agent=a , local_files_only=a , )
elif os.path.exists(a ):
# File, and it exists.
__A : List[str] = url_or_filename
elif urlparse(a ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(a ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(a ) )
if extract_compressed_file:
if not is_zipfile(a ) and not tarfile.is_tarfile(a ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__A : Optional[int] = os.path.split(a )
__A : Union[str, Any] = output_file.replace('.' , '-' ) + '-extracted'
__A : Dict = os.path.join(a , a )
if os.path.isdir(a ) and os.listdir(a ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__A : List[Any] = output_path + '.lock'
with FileLock(a ):
shutil.rmtree(a , ignore_errors=a )
os.makedirs(a )
if is_zipfile(a ):
with ZipFile(a , 'r' ) as zip_file:
zip_file.extractall(a )
zip_file.close()
elif tarfile.is_tarfile(a ):
__A : int = tarfile.open(a )
tar_file.extractall(a )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(a ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( a , a="," ) -> List[str]:
assert isinstance(a , a )
if os.path.isfile(a ):
with open(a ) as f:
__A : Optional[int] = eval(f.read() )
else:
__A : List[Any] = requests.get(a )
try:
__A : Tuple = requests.json()
except Exception:
__A : str = req.content.decode()
assert data is not None, "could not connect"
try:
__A : int = eval(a )
except Exception:
__A : int = data.split('\n' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[str] = requests.get(a )
__A : Any = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( a ) -> Union[str, Any]:
__A : Dict = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(a )
with open(a , 'rb' ) as stream:
__A : Any = pkl.load(a )
__A : Dict = weights.pop('model' )
__A : Any = {}
for k, v in model.items():
__A : int = torch.from_numpy(a )
if "running_var" in k:
__A : Union[str, Any] = torch.tensor([0] )
__A : Optional[Any] = k.replace('running_var' , 'num_batches_tracked' )
__A : int = zero
return new
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
print(F"""{os.path.abspath(os.path.join(a , os.pardir ) )}/demo.ipynb""" )
def _SCREAMING_SNAKE_CASE ( a , a="RGB" ) -> Tuple:
assert isinstance(a , a )
if os.path.isfile(a ):
__A : Any = cva.imread(a )
else:
__A : List[str] = get_image_from_url(a )
assert img is not None, F"""could not connect to: {im}"""
__A : Any = cva.cvtColor(a , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__A : Union[str, Any] = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( a , a=1 ) -> Dict:
return (images[i : i + batch] for i in range(0 , len(a ) , a ))
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A ):
__A : Any = data
def __iter__( self ):
for element in self.data:
yield element
def _SCREAMING_SNAKE_CASE ( a=True ) -> Any:
__A : List[Any] = Accelerator(even_batches=a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _SCREAMING_SNAKE_CASE ( a , a , a , a = False ) -> str:
if iterable:
__A : int = DummyIterableDataset(torch.as_tensor(range(a ) ) )
else:
__A : Optional[Any] = TensorDataset(torch.as_tensor(range(a ) ) )
__A : Optional[Any] = DataLoader(a , batch_size=a )
__A : Optional[int] = accelerator.prepare(a )
return dl
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a , ) -> Union[str, Any]:
__A : Optional[int] = create_dataloader(accelerator=a , dataset_size=a , batch_size=a )
__A : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : int = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : str = create_accelerator(even_batches=a )
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : Optional[Any] = create_accelerator(even_batches=a )
__A : str = torch.nn.Linear(1 , 1 )
__A : Optional[int] = accelerator.prepare(a )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a ):
__A : Dict = ddp_model(batch[0].float() )
__A : List[str] = output.sum()
loss.backward()
batch_idxs.append(a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for multi-GPU" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : int = True
__A : Union[str, Any] = False
__A : Optional[int] = create_accelerator(even_batches=a )
__A : int = torch.nn.Linear(1 , 1 )
__A : List[Any] = accelerator.prepare(a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : List[str] = train_dl.batch_sampler.even_batches
__A : Dict = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Any = True
__A : List[Any] = False
__A : Tuple = create_accelerator(even_batches=a )
__A : List[str] = torch.nn.Linear(1 , 1 )
__A : Optional[Any] = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : Tuple = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__A : Any = create_accelerator()
__A : Union[str, Any] = torch.nn.Linear(1 , 1 )
__A : str = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for map-style datasets" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : str = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
__A : int = accelerator.state.distributed_type
__A : Tuple = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a )
__A : str = original_state
if __name__ == "__main__":
main()
| 77 | 0 |
from __future__ import annotations
from random import random
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
__A : List[Any] = value
__A : Any = random()
__A : Node | None = None
__A : Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ):
__A : Union[str, Any] = str(self.value ) + ' '
__A : Tuple = str(self.left or '' )
__A : List[Any] = str(self.right or '' )
return value + left + right
def _SCREAMING_SNAKE_CASE ( a , a ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__A : Optional[Any] = split(root.left , a )
return left, root
else:
__A : List[Any] = split(root.right , a )
return root, right
def _SCREAMING_SNAKE_CASE ( a , a ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__A : List[Any] = merge(left.right , a )
return left
else:
__A : Any = merge(a , right.left )
return right
def _SCREAMING_SNAKE_CASE ( a , a ) -> Node | None:
__A : Tuple = Node(a )
__A : Optional[Any] = split(a , a )
return merge(merge(a , a ) , a )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Node | None:
__A : Union[str, Any] = split(a , value - 1 )
__A : Dict = split(a , a )
return merge(a , a )
def _SCREAMING_SNAKE_CASE ( a ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
__A : Tuple = insert(a , int(arg[1:] ) )
elif arg[0] == "-":
__A : Union[str, Any] = erase(a , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def _SCREAMING_SNAKE_CASE ( ) -> None:
__A : Union[str, Any] = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
__A : Tuple = input()
while args != "q":
__A : str = interact_treap(a , a )
print(a )
__A : Dict = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 701 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : str = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = '''codegen'''
UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _A=50400 , _A=2048 , _A=2048 , _A=4096 , _A=28 , _A=16 , _A=64 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.0_2 , _A=True , _A=50256 , _A=50256 , _A=False , **_A , ):
__A : Any = vocab_size
__A : Tuple = n_ctx
__A : Union[str, Any] = n_positions
__A : Optional[Any] = n_embd
__A : Any = n_layer
__A : Dict = n_head
__A : Union[str, Any] = n_inner
__A : List[Any] = rotary_dim
__A : str = activation_function
__A : Any = resid_pdrop
__A : Tuple = embd_pdrop
__A : Tuple = attn_pdrop
__A : Union[str, Any] = layer_norm_epsilon
__A : str = initializer_range
__A : Optional[Any] = use_cache
__A : Union[str, Any] = bos_token_id
__A : Tuple = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , 'pad_token_id' , _A ):
# TODO: how to do that better?
__A : Dict = 0
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
__A : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__A : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self ):
return self._config.n_layer
@property
def UpperCAmelCase_ ( self ):
return self._config.n_head
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : Any = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__A : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__A : Any = seqlen + 2
__A : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A : Optional[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__A : Tuple = common_inputs['attention_mask']
if self.use_past:
__A : str = ordered_inputs['attention_mask'].dtype
__A : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self ):
return 13
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[str] = []
__A : Tuple = []
__A : Union[str, Any] = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
__A : List[str] = len(a ) if (len(a ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(a ) , 'Postfix'.center(a ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a ) == 0:
stack.append(a ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(a ) # push x to stack
print(
x.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
while len(a ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
return "".join(a ) # return Postfix as str
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
__A : List[Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(a ) ):
if infix[i] == "(":
__A : List[str] = ')' # change "(" to ")"
elif infix[i] == ")":
__A : Any = '(' # change ")" to "("
return (infix_2_postfix(''.join(a ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase : List[str] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase : Union[str, Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 702 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 77 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase : List[str] = '''true'''
def _SCREAMING_SNAKE_CASE ( a , a=82 , a=16 ) -> Dict:
set_seed(42 )
__A : Tuple = RegressionModel()
__A : Optional[Any] = deepcopy(a )
__A : Optional[int] = RegressionDataset(length=a )
__A : str = DataLoader(a , batch_size=a )
model.to(accelerator.device )
__A : Union[str, Any] = accelerator.prepare(a , a )
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE ( a , a=False ) -> Optional[Any]:
__A : str = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
__A : Dict = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(a ):
__A : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a , max_length=a )
return outputs
with accelerator.main_process_first():
__A : Optional[int] = dataset.map(
a , batched=a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
__A : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a ):
if use_longest:
return tokenizer.pad(a , padding='longest' , return_tensors='pt' )
return tokenizer.pad(a , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return DataLoader(a , shuffle=a , collate_fn=a , batch_size=16 )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Union[str, Any]:
__A : Union[str, Any] = Accelerator(dispatch_batches=a , split_batches=a )
__A : Tuple = get_dataloader(a , not dispatch_batches )
__A : Tuple = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=a )
__A : Optional[Any] = accelerator.prepare(a , a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Dict:
__A : Optional[Any] = []
for batch in dataloader:
__A : List[str] = batch.values()
with torch.no_grad():
__A : List[str] = model(a )
__A : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__A : Tuple = [], []
for logit, targ in logits_and_targets:
logits.append(a )
targs.append(a )
__A : List[Any] = torch.cat(a ), torch.cat(a )
return logits, targs
def _SCREAMING_SNAKE_CASE ( a , a=82 , a=False , a=False , a=16 ) -> Dict:
__A : Tuple = get_basic_setup(a , a , a )
__A : Union[str, Any] = generate_predictions(a , a , a )
assert (
len(a ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(a )}"""
def _SCREAMING_SNAKE_CASE ( a = False , a = False ) -> List[Any]:
__A : Optional[int] = evaluate.load('glue' , 'mrpc' )
__A : int = get_mrpc_setup(a , a )
# First do baseline
__A : Optional[int] = setup['no']
model.to(a )
model.eval()
for batch in dataloader:
batch.to(a )
with torch.inference_mode():
__A : List[Any] = model(**a )
__A : List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=a , references=batch['labels'] )
__A : Dict = metric.compute()
# Then do distributed
__A : Dict = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__A : Optional[Any] = model(**a )
__A : Dict = outputs.logits.argmax(dim=-1 )
__A : Optional[int] = batch['labels']
__A : Optional[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=a , references=a )
__A : int = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : List[Any] = Accelerator(split_batches=a , dispatch_batches=a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(a , a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__A : str = Accelerator(split_batches=a , dispatch_batches=a )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
__A : int = Accelerator()
test_torch_metrics(a , 5_12 )
accelerator.state._reset_state()
def _SCREAMING_SNAKE_CASE ( a ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 703 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : Union[str, Any] = ''''''
UpperCAmelCase : Optional[int] = ''''''
UpperCAmelCase : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ) -> None:
__A , __A : List[Any] = get_dataset(a , a )
print('Processing...' )
__A , __A , __A : Optional[Any] = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__A : Optional[int] = random_chars(32 )
__A : Dict = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__A : Dict = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(a )} with {file_name}""" )
__A : int = []
for anno in new_annos[index]:
__A : Any = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a )
with open(F"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> tuple[list, list]:
__A : int = []
__A : List[Any] = []
for label_file in glob.glob(os.path.join(a , '*.txt' ) ):
__A : List[str] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(a ) as in_file:
__A : Tuple = in_file.readlines()
__A : Dict = os.path.join(a , F"""{label_name}.jpg""" )
__A : Dict = []
for obj_list in obj_lists:
__A : int = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( a , a , a = 1 ) -> tuple[list, list, list]:
__A : int = []
__A : Optional[Any] = []
__A : Dict = []
for idx in range(len(a ) ):
__A : Dict = []
__A : Optional[Any] = img_list[idx]
path_list.append(a )
__A : Union[str, Any] = anno_list[idx]
__A : Optional[Any] = cva.imread(a )
if flip_type == 1:
__A : Any = cva.flip(a , a )
for bbox in img_annos:
__A : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__A : Union[str, Any] = cva.flip(a , a )
for bbox in img_annos:
__A : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( a = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__A : List[Any] = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
__A : list[list[str]] = [[] for _ in range(a )]
__A : Optional[Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(a ) <= key:
return input_string
for position, character in enumerate(a ):
__A : Any = position % (lowest * 2) # puts it in bounds
__A : Any = min(a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(a )
__A : Any = [''.join(a ) for row in temp_grid]
__A : List[Any] = ''.join(a )
return output_string
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
__A : Dict = []
__A : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
__A : list[list[str]] = [[] for _ in range(a )] # generates template
for position in range(len(a ) ):
__A : Optional[int] = position % (lowest * 2) # puts it in bounds
__A : Any = min(a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
__A : List[Any] = 0
for row in temp_grid: # fills in the characters
__A : str = input_string[counter : counter + len(a )]
grid.append(list(a ) )
counter += len(a )
__A : str = '' # reads as zigzag
for position in range(len(a ) ):
__A : Dict = position % (lowest * 2) # puts it in bounds
__A : Any = min(a , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _SCREAMING_SNAKE_CASE ( a ) -> dict[int, str]:
__A : int = {}
for key_guess in range(1 , len(a ) ): # tries every key
__A : str = decrypt(a , a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
__A : Union[str, Any] = parent
__A : List[str] = batch_size
__A : Optional[int] = seq_length
__A : List[Any] = is_training
__A : Optional[Any] = use_input_mask
__A : List[Any] = use_token_type_ids
__A : Optional[Any] = use_labels
__A : List[str] = vocab_size
__A : Optional[int] = hidden_size
__A : List[Any] = num_hidden_layers
__A : int = num_attention_heads
__A : Dict = intermediate_size
__A : Any = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Dict = type_vocab_size
__A : Any = type_sequence_label_size
__A : Dict = initializer_range
__A : str = num_labels
__A : Union[str, Any] = num_choices
__A : str = scope
def UpperCAmelCase_ ( self ):
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_input_mask:
__A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = None
__A : List[Any] = None
__A : List[Any] = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : List[str] = LlamaModel(config=_A )
model.to(_A )
model.eval()
__A : Any = model(_A , attention_mask=_A )
__A : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Dict = True
__A : int = LlamaModel(_A )
model.to(_A )
model.eval()
__A : str = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
__A : List[Any] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Optional[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : int = True
__A : List[Any] = True
__A : List[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__A : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
__A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : str = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0]
__A : Union[str, Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Tuple = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : int = False
UpperCamelCase : Dict = False
def UpperCAmelCase_ ( self ):
__A : List[Any] = LlamaModelTester(self )
__A : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : int = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : Optional[int] = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(_A )
__A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = 3
__A : Tuple = 'single_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[int] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : int = 'multi_label_classification'
__A : int = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : List[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def UpperCAmelCase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCAmelCase_ ( self , _A ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = ids_tensor([1, 10] , config.vocab_size )
__A : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = LlamaModel(_A )
original_model.to(_A )
original_model.eval()
__A : Dict = original_model(_A ).last_hidden_state
__A : int = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : int = {'type': scaling_type, 'factor': 1_0.0}
__A : str = LlamaModel(_A )
scaled_model.to(_A )
scaled_model.eval()
__A : Dict = scaled_model(_A ).last_hidden_state
__A : str = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
__A : Union[str, Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__A : Optional[int] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : str = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[str] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
__A : int = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : List[str] = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
__A : Optional[int] = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : Optional[Any] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
__A : List[Any] = model(torch.tensor(_A ) )
__A : Tuple = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# fmt: off
__A : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
__A : List[str] = 'Simply put, the theory of relativity states that '
__A : Union[str, Any] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
__A : List[str] = tokenizer.encode(_A , return_tensors='pt' )
__A : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_A )
# greedy generation outputs
__A : Union[str, Any] = model.generate(_A , max_new_tokens=64 , top_p=_A , temperature=1 , do_sample=_A )
__A : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
| 77 | 0 |
from __future__ import annotations
from random import choice
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return choice(a )
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
__A : int = random_pivot(a )
# partition based on pivot
# linear time
__A : Tuple = [e for e in lst if e < pivot]
__A : List[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a ) < k - 1:
return kth_number(a , k - len(a ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase : str = HfApi()
UpperCAmelCase : List[str] = {}
# fmt: off
UpperCAmelCase : Optional[Any] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCAmelCase : Dict = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCAmelCase : str = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCAmelCase : Optional[Any] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCAmelCase : List[Any] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCAmelCase : Optional[int] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCAmelCase : Tuple = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCAmelCase : Any = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCAmelCase : Tuple = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCAmelCase : Dict = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCAmelCase : Tuple = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCAmelCase : List[str] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCAmelCase : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase : Union[str, Any] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
UpperCAmelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
UpperCAmelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase : int = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase : Optional[int] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase : Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 77 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCAmelCase : Tuple = '''pytorch_model.bin'''
UpperCAmelCase : Any = '''pytorch_model.bin.index.json'''
UpperCAmelCase : int = '''adapter_config.json'''
UpperCAmelCase : Union[str, Any] = '''adapter_model.bin'''
UpperCAmelCase : Dict = '''adapter_model.safetensors'''
UpperCAmelCase : Optional[int] = '''tf_model.h5'''
UpperCAmelCase : Tuple = '''tf_model.h5.index.json'''
UpperCAmelCase : List[Any] = '''model.ckpt'''
UpperCAmelCase : Optional[Any] = '''flax_model.msgpack'''
UpperCAmelCase : Optional[Any] = '''flax_model.msgpack.index.json'''
UpperCAmelCase : int = '''model.safetensors'''
UpperCAmelCase : Optional[Any] = '''model.safetensors.index.json'''
UpperCAmelCase : List[str] = '''config.json'''
UpperCAmelCase : List[str] = '''preprocessor_config.json'''
UpperCAmelCase : Union[str, Any] = FEATURE_EXTRACTOR_NAME
UpperCAmelCase : Optional[Any] = '''generation_config.json'''
UpperCAmelCase : Optional[Any] = '''modelcard.json'''
UpperCAmelCase : Optional[Any] = '''▁'''
UpperCAmelCase : List[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCAmelCase : Optional[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCAmelCase : Optional[int] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCAmelCase : Tuple = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
if version.parse(a ) < version.parse(a ):
if "dev" in min_version:
__A : str = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
__A : Optional[Any] = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 706 |
import numpy as np
from PIL import Image
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> np.ndarray:
__A : Union[str, Any] = np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__A : List[Any] = 0
__A : Optional[Any] = 0
__A : List[Any] = 0
__A : Dict = 0
# compute the shape of the output matrix
__A : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__A : Optional[int] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__A : Tuple = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A : List[str] = 0
__A : Union[str, Any] = 0
return updated_arr
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> np.ndarray:
__A : List[Any] = np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__A : Dict = 0
__A : str = 0
__A : Tuple = 0
__A : Optional[int] = 0
# compute the shape of the output matrix
__A : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__A : Any = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__A : Tuple = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A : Dict = 0
__A : int = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
UpperCAmelCase : int = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
__A : int = len(a )
__A : int = len(a )
__A : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__A : list = []
for char_count in range(a ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(a )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 707 |
from __future__ import annotations
from collections.abc import Callable
def _SCREAMING_SNAKE_CASE ( a , a , a , a = 1_00 , ) -> float:
__A : Any = x_start
__A : List[str] = fnc(a )
__A : Optional[Any] = 0.0
for _ in range(a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__A : Any = (x_end - x_start) / steps + xa
__A : List[str] = fnc(a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__A : Any = xa
__A : Dict = fxa
return area
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
UpperCAmelCase : Tuple = 10
while i <= 10_00_00:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 77 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = '''trocr'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Tuple = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _A=50265 , _A=1024 , _A=12 , _A=16 , _A=4096 , _A="gelu" , _A=512 , _A=0.1 , _A=0.0 , _A=0.0 , _A=2 , _A=0.0_2 , _A=0.0 , _A=True , _A=False , _A=True , _A=True , _A=1 , _A=0 , _A=2 , **_A , ):
__A : str = vocab_size
__A : Union[str, Any] = d_model
__A : str = decoder_layers
__A : Dict = decoder_attention_heads
__A : Optional[int] = decoder_ffn_dim
__A : Tuple = activation_function
__A : Optional[int] = max_position_embeddings
__A : Tuple = dropout
__A : int = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : str = init_std
__A : List[Any] = decoder_layerdrop
__A : List[Any] = use_cache
__A : Any = scale_embedding
__A : Optional[int] = use_learned_position_embeddings
__A : Tuple = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 708 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _SCREAMING_SNAKE_CASE ( ) -> None:
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[tuple[int, int], tuple[int, int]]:
print('Generating prime p...' )
__A : Optional[Any] = rabinMiller.generate_large_prime(a )
print('Generating prime q...' )
__A : Union[str, Any] = rabinMiller.generate_large_prime(a )
__A : Tuple = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
__A : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(a , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
__A : Any = cryptoMath.find_mod_inverse(a , (p - 1) * (q - 1) )
__A : Dict = (n, e)
__A : Dict = (n, d)
return (public_key, private_key)
def _SCREAMING_SNAKE_CASE ( a , a ) -> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__A , __A : Optional[int] = generate_key(a )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 77 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = '''▁'''
UpperCAmelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase : Any = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase : Any = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase : Dict = {
'''ernie-m-base''': 5_14,
'''ernie-m-large''': 5_14,
}
UpperCAmelCase : Dict = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = ["input_ids"]
UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = RESOURCE_FILES_NAMES
def __init__( self , _A , _A=None , _A=False , _A="utf8" , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A = None , **_A , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__A : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , vocab_file=_A , encoding=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__A : List[Any] = do_lower_case
__A : Optional[int] = sentencepiece_model_ckpt
__A : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__A : Any = self.load_vocab(filepath=_A )
else:
__A : Tuple = {self.sp_model.id_to_piece(_A ): id for id in range(self.sp_model.get_piece_size() )}
__A : Union[str, Any] = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase_ ( self , _A ):
if text is None:
return None
__A : str = self.tokenize(_A )
__A : Any = '', []
for i, ch in enumerate(_A ):
if ch in self.SP_CHAR_MAPPING:
__A : Optional[int] = self.SP_CHAR_MAPPING.get(_A )
else:
__A : Optional[int] = unicodedata.normalize('NFKC' , _A )
if self.is_whitespace(_A ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_A ) )
__A : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
__A : str = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__A : Dict = token[1:]
__A : Optional[Any] = text[offset:].index(_A ) + offset
__A : Dict = start + len(_A )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__A : Union[str, Any] = end
return token_mapping
@property
def UpperCAmelCase_ ( self ):
return len(self.vocab )
def UpperCAmelCase_ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
__A : Union[str, Any] = self.__dict__.copy()
__A : Dict = None
return state
def __setstate__( self , _A ):
__A : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : Dict = {}
__A : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase_ ( self , _A ):
return "".join((self.SP_CHAR_MAPPING.get(_A , _A ) for c in text) )
def UpperCAmelCase_ ( self , _A , _A=False , _A=64 , _A=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
__A : Any = True
if self.sp_model_kwargs.get('alpha' ) is not None:
__A : Tuple = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
__A : List[Any] = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
__A : int = self.sp_model.EncodeAsPieces(_A )
else:
__A : List[str] = self.sp_model.SampleEncodeAsPieces(_A , _A , _A )
__A : Optional[int] = []
for pi, piece in enumerate(_A ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_A ) and pi != 0:
new_pieces.append(_A )
continue
else:
continue
__A : Tuple = 0
for i, chunk in enumerate(_A ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_A ) or self.is_punct(_A ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_A )
__A : Optional[int] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A : Optional[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A : int = i
if len(_A ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase_ ( self , _A ):
__A : str = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self , _A ):
__A : str = self.convert_ids_to_tokens(_A )
__A : Union[str, Any] = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self , _A ):
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _A ):
return self.reverse_vocab.get(_A , self.unk_token )
def UpperCAmelCase_ ( self , _A , _A=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
__A : Union[str, Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase_ ( self , _A , _A=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase_ ( self , _A , _A=None , _A=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_A ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_A ) + 1) + [1] * (len(_A ) + 3)
def UpperCAmelCase_ ( self , _A ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase_ ( self , _A ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase_ ( self , _A ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase_ ( self , _A ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_A ) == 1:
__A : Union[str, Any] = unicodedata.category(_A )
if cat == "Zs":
return True
return False
def UpperCAmelCase_ ( self , _A ):
__A : str = {}
with io.open(_A , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_A ):
__A : Any = line.rstrip('\n' )
__A : Union[str, Any] = int(_A )
return token_to_idx
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Tuple = 0
if os.path.isdir(_A ):
__A : str = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__A : Union[str, Any] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_A , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
__A : str = token_index
writer.write(token + '\n' )
index += 1
__A : Union[str, Any] = os.path.join(_A , 'sentencepiece.bpe.model' )
with open(_A , 'wb' ) as fi:
__A : str = self.sp_model.serialized_model_proto()
fi.write(_A )
return (vocab_file,)
| 709 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = ProphetNetTokenizer
UpperCamelCase : Tuple = False
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = 'UNwant\u00E9d,running'
__A : List[str] = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self ):
__A : Tuple = self.tokenizer_class(self.vocab_file )
__A : List[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase_ ( self ):
__A : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCAmelCase_ ( self ):
__A : List[str] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Dict = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : List[Any] = BasicTokenizer(do_lower_case=_A , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__A : Optional[int] = {}
for i, token in enumerate(_A ):
__A : Tuple = i
__A : Tuple = WordpieceTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def UpperCAmelCase_ ( self ):
__A : int = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__A : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__A : str = tokenizer(_A , padding=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
__A : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Any = tokenizer.encode('sequence builders' , add_special_tokens=_A )
__A : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_A )
__A : str = tokenizer.build_inputs_with_special_tokens(_A )
__A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 77 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[Any] = '''mra'''
def __init__( self , _A=50265 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=1 , _A=0.0_2 , _A=1e-5 , _A="absolute" , _A=4 , _A="full" , _A=0 , _A=0 , _A=1 , _A=0 , _A=2 , **_A , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__A : List[str] = vocab_size
__A : str = max_position_embeddings
__A : Optional[Any] = hidden_size
__A : List[Any] = num_hidden_layers
__A : str = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : List[str] = hidden_act
__A : List[str] = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Dict = initializer_range
__A : List[str] = type_vocab_size
__A : Dict = layer_norm_eps
__A : int = position_embedding_type
__A : Optional[Any] = block_per_row
__A : int = approx_mode
__A : str = initial_prior_first_n_blocks
__A : Tuple = initial_prior_diagonal_n_blocks
| 710 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Any = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase : Optional[int] = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
UpperCAmelCase : List[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[str] = BertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
__A : Any = getattr(_A , normalizer_state.pop('type' ) )
__A : Union[str, Any] = do_lower_case
__A : Optional[int] = strip_accents
__A : List[Any] = tokenize_chinese_chars
__A : int = normalizer_class(**_A )
__A : Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self , _A , _A=None ):
__A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Optional[Any] = [self.sep_token_id]
__A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 77 | 0 |
from __future__ import annotations
UpperCAmelCase : Tuple = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCAmelCase : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _SCREAMING_SNAKE_CASE ( a ) -> list[float]:
__A : Any = []
__A : int = len(a )
for i in range(a ):
__A : float = -1
for j in range(i + 1 , a ):
if arr[i] < arr[j]:
__A : Optional[Any] = arr[j]
break
result.append(a )
return result
def _SCREAMING_SNAKE_CASE ( a ) -> list[float]:
__A : List[Any] = []
for i, outer in enumerate(a ):
__A : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
__A : Optional[int] = inner
break
result.append(a )
return result
def _SCREAMING_SNAKE_CASE ( a ) -> list[float]:
__A : Tuple = len(a )
__A : list[float] = []
__A : list[float] = [-1] * arr_size
for index in reversed(range(a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__A : int = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCAmelCase : Optional[int] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 711 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
debug_launcher(test_script.main )
def UpperCAmelCase_ ( self ):
debug_launcher(test_ops.main )
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( ) -> int:
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(a , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Tuple = tempfile.mkdtemp()
# fmt: off
__A : Union[str, Any] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Dict = dict(zip(_A , range(len(_A ) ) ) )
__A : int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : Optional[Any] = {'unk_token': '<unk>'}
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : Union[str, Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : List[str] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_tokenizer()
__A : Dict = self.get_rust_tokenizer()
__A : Optional[Any] = self.get_image_processor()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Any = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : int = self.get_image_processor(do_normalize=_A )
__A : int = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : List[Any] = self.prepare_image_inputs()
__A : Any = image_processor(_A , return_tensors='np' )
__A : Tuple = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = 'lower newer'
__A : Any = processor(text=_A , return_tensors='np' )
__A : Dict = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Tuple = 'lower newer'
__A : Union[str, Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Any = ['cat', 'nasa badge']
__A : List[Any] = processor(text=_A )
__A : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : int = [['cat', 'nasa badge'], ['person']]
__A : str = processor(text=_A )
__A : int = 16
__A : Optional[int] = len(_A )
__A : int = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : int = 'google/owlvit-base-patch32'
__A : List[str] = OwlViTProcessor.from_pretrained(_A )
__A : Tuple = ['cat', 'nasa badge']
__A : Dict = processor(text=_A )
__A : Tuple = 16
__A : str = inputs['input_ids']
__A : str = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Dict = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = self.prepare_image_inputs()
__A : Tuple = self.prepare_image_inputs()
__A : Any = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Union[str, Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 77 | 0 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=4 , ):
__A : Optional[int] = parent
__A : List[Any] = batch_size
__A : Tuple = seq_length
__A : Optional[Any] = is_training
__A : str = use_attention_mask
__A : Union[str, Any] = use_token_type_ids
__A : Union[str, Any] = use_labels
__A : List[str] = vocab_size
__A : List[Any] = hidden_size
__A : List[Any] = num_hidden_layers
__A : Any = num_attention_heads
__A : str = intermediate_size
__A : str = hidden_act
__A : Dict = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Union[str, Any] = type_sequence_label_size
__A : Tuple = initializer_range
__A : str = num_choices
def UpperCAmelCase_ ( self ):
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_attention_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Union[str, Any] = None
if self.use_token_type_ids:
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self ):
__A : List[str] = self.prepare_config_and_inputs()
__A : str = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.prepare_config_and_inputs()
__A : Optional[Any] = config_and_inputs
__A : Dict = True
__A : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = True
UpperCamelCase : Any = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = FlaxBertModelTester(self )
@slow
def UpperCAmelCase_ ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
__A : List[str] = FlaxBertModel.from_pretrained('bert-base-cased' )
__A : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Tuple:
for attribute in key.split('.' ):
__A : Dict = getattr(a , a )
if weight_type is not None:
__A : Any = getattr(a , a ).shape
else:
__A : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__A : Union[str, Any] = value
elif weight_type == "weight_g":
__A : Dict = value
elif weight_type == "weight_v":
__A : Optional[int] = value
elif weight_type == "bias":
__A : int = value
elif weight_type == "running_mean":
__A : Union[str, Any] = value
elif weight_type == "running_var":
__A : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
__A : Any = value
elif weight_type == "inv_freq":
__A : Optional[Any] = value
else:
__A : int = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Union[str, Any]:
__A : Any = []
__A : Optional[int] = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__A : int = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__A : Any = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : Optional[Any] = True
if "*" in mapped_key:
__A : str = name.split(a )[0].split('.' )[-2]
__A : int = mapped_key.replace('*' , a )
if "pos_bias_u" in name:
__A : Optional[int] = None
elif "pos_bias_v" in name:
__A : Dict = None
elif "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Dict = 'weight_v'
elif "bias" in name:
__A : Tuple = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : int = 'weight'
elif "running_mean" in name:
__A : str = 'running_mean'
elif "inv_freq" in name:
__A : List[Any] = 'inv_freq'
elif "running_var" in name:
__A : Union[str, Any] = 'running_var'
elif "num_batches_tracked" in name:
__A : Optional[Any] = 'num_batches_tracked'
else:
__A : List[str] = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Any:
__A : str = full_name.split('conv_layers.' )[-1]
__A : str = name.split('.' )
__A : Dict = int(items[0] )
__A : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__A : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__A : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a , a=None , a=None , a=True ) -> Any:
if config_path is not None:
__A : Tuple = WavaVecaConformerConfig.from_pretrained(a , hidden_act='swish' )
else:
__A : Optional[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__A : Dict = 'rotary'
if is_finetuned:
if dict_path:
__A : Dict = Dictionary.load(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A : int = target_dict.pad_index
__A : List[Any] = target_dict.bos_index
__A : Any = target_dict.eos_index
__A : Dict = len(target_dict.symbols )
__A : Optional[Any] = os.path.join(a , 'vocab.json' )
if not os.path.isdir(a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(a ) )
return
os.makedirs(a , exist_ok=a )
__A : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__A : int = 0
__A : Optional[Any] = 1
with open(a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(a , a )
__A : Optional[Any] = WavaVecaCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=a , )
__A : Tuple = True if config.feat_extract_norm == 'layer' else False
__A : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
__A : Optional[int] = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
__A : List[Any] = WavaVecaConformerForCTC(a )
else:
__A : List[Any] = WavaVecaConformerForPreTraining(a )
if is_finetuned:
__A , __A , __A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__A : Optional[Any] = argparse.Namespace(task='audio_pretraining' )
__A : str = fairseq.tasks.setup_task(a )
__A , __A , __A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a )
__A : Tuple = model[0].eval()
recursively_load_weights(a , a , not is_finetuned )
hf_wavavec.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 77 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = ShapEPipeline
UpperCamelCase : str = ['''prompt''']
UpperCamelCase : Tuple = ['''prompt''']
UpperCamelCase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase : int = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 8
@property
def UpperCAmelCase_ ( self ):
__A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__A : Optional[Any] = PriorTransformer(**_A )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__A : List[Any] = ShapERenderer(**_A )
return model
def UpperCAmelCase_ ( self ):
__A : List[str] = self.dummy_prior
__A : Optional[int] = self.dummy_text_encoder
__A : List[Any] = self.dummy_tokenizer
__A : str = self.dummy_renderer
__A : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
__A : Any = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(_A ).startswith('mps' ):
__A : List[Any] = torch.manual_seed(_A )
else:
__A : Dict = torch.Generator(device=_A ).manual_seed(_A )
__A : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : Tuple = 'cpu'
__A : Any = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**_A )
__A : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Tuple = pipe(**self.get_dummy_inputs(_A ) )
__A : int = output.images[0]
__A : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__A : Any = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self ):
__A : List[str] = torch_device == 'cpu'
__A : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCAmelCase_ ( self ):
__A : Any = self.get_dummy_components()
__A : Any = self.pipeline_class(**_A )
__A : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Any = 1
__A : Dict = 2
__A : Tuple = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
__A : Optional[int] = batch_size * [inputs[key]]
__A : Optional[int] = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__A : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__A : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : str = torch.Generator(device=_A ).manual_seed(0 )
__A : Tuple = pipe(
'a shark' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 714 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A( snake_case__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( _A ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self ):
raise NotImplementedError()
| 77 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Optional[int] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 77 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 716 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = ShapEPipeline
UpperCamelCase : str = ['''prompt''']
UpperCamelCase : Tuple = ['''prompt''']
UpperCamelCase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase : int = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 8
@property
def UpperCAmelCase_ ( self ):
__A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__A : Optional[Any] = PriorTransformer(**_A )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__A : List[Any] = ShapERenderer(**_A )
return model
def UpperCAmelCase_ ( self ):
__A : List[str] = self.dummy_prior
__A : Optional[int] = self.dummy_text_encoder
__A : List[Any] = self.dummy_tokenizer
__A : str = self.dummy_renderer
__A : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
__A : Any = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(_A ).startswith('mps' ):
__A : List[Any] = torch.manual_seed(_A )
else:
__A : Dict = torch.Generator(device=_A ).manual_seed(_A )
__A : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : Tuple = 'cpu'
__A : Any = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**_A )
__A : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Tuple = pipe(**self.get_dummy_inputs(_A ) )
__A : int = output.images[0]
__A : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__A : Any = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self ):
__A : List[str] = torch_device == 'cpu'
__A : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCAmelCase_ ( self ):
__A : Any = self.get_dummy_components()
__A : Any = self.pipeline_class(**_A )
__A : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Any = 1
__A : Dict = 2
__A : Tuple = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
__A : Optional[int] = batch_size * [inputs[key]]
__A : Optional[int] = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__A : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__A : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : str = torch.Generator(device=_A ).manual_seed(0 )
__A : Tuple = pipe(
'a shark' , generator=_A , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 77 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
def _SCREAMING_SNAKE_CASE ( *a , **a ) -> List[str]:
requires_backends(a , ['torch'] )
def _SCREAMING_SNAKE_CASE ( *a , **a ) -> Optional[Any]:
requires_backends(a , ['torch'] )
def _SCREAMING_SNAKE_CASE ( *a , **a ) -> Optional[Any]:
requires_backends(a , ['torch'] )
def _SCREAMING_SNAKE_CASE ( *a , **a ) -> Optional[int]:
requires_backends(a , ['torch'] )
def _SCREAMING_SNAKE_CASE ( *a , **a ) -> Dict:
requires_backends(a , ['torch'] )
def _SCREAMING_SNAKE_CASE ( *a , **a ) -> Optional[int]:
requires_backends(a , ['torch'] )
def _SCREAMING_SNAKE_CASE ( *a , **a ) -> Tuple:
requires_backends(a , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Dict = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
class _A( metaclass=snake_case__ ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ['''torch''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
@classmethod
def UpperCAmelCase_ ( cls , *_A , **_A ):
requires_backends(cls , ['torch'] )
| 717 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if len(a ) != 2 or len(a[0] ) != 2 or len(a ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__A : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[int]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[list, list, list, list]:
if len(a ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__A : str = len(a )
__A : List[Any] = matrix_length // 2
__A : List[str] = [[a[i][j] for j in range(a , a )] for i in range(a )]
__A : Dict = [
[a[i][j] for j in range(a , a )] for i in range(a , a )
]
__A : int = [[a[i][j] for j in range(a )] for i in range(a )]
__A : Any = [[a[i][j] for j in range(a )] for i in range(a , a )]
return top_left, top_right, bot_left, bot_right
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[int, int]:
return len(a ), len(matrix[0] )
def _SCREAMING_SNAKE_CASE ( a ) -> None:
print('\n'.join(str(a ) for line in matrix ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a ) == (2, 2):
return default_matrix_multiplication(a , a )
__A , __A , __A , __A : str = split_matrix(a )
__A , __A , __A , __A : List[Any] = split_matrix(a )
__A : Any = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Tuple = actual_strassen(matrix_addition(a , a ) , a )
__A : List[str] = actual_strassen(matrix_addition(a , a ) , a )
__A : Optional[int] = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Any = actual_strassen(matrix_addition(a , a ) , matrix_addition(a , a ) )
__A : Any = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
__A : Union[str, Any] = matrix_addition(a , a )
__A : str = matrix_addition(a , a )
__A : Dict = matrix_subtraction(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
# construct the new matrix from our 4 quadrants
__A : List[Any] = []
for i in range(len(a ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a )[1] != matrix_dimensions(a )[0]:
__A : Dict = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a )
__A : int = matrix_dimensions(a )
__A : Any = matrix_dimensions(a )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__A : List[Any] = max(*a , *a )
__A : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(a ) ) ) )
__A : Union[str, Any] = matrixa
__A : Optional[int] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__A : str = actual_strassen(a , a )
# Removing the additional zeros
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 77 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def UpperCAmelCase_ ( self , _A=0 ):
__A : List[str] = floats_tensor((1, 3, 128, 128) , rng=random.Random(_A ) )
__A : Optional[Any] = np.random.RandomState(_A )
__A : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.7_5,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_A )
__A : Tuple = self.get_dummy_inputs()
__A : List[str] = pipe(**_A ).images
__A : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__A : Any = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self ):
__A : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__A : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_A )
pipe.set_progress_bar_config(disable=_A )
__A : Optional[Any] = self.get_dummy_inputs()
__A : Optional[int] = pipe(**_A ).images
__A : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__A : Optional[int] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self ):
__A : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__A : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
# warmup pass to apply optimizations
__A : str = pipe(**self.get_dummy_inputs() )
__A : Optional[int] = self.get_dummy_inputs()
__A : List[Any] = pipe(**_A ).images
__A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__A : Dict = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self ):
__A : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__A : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__A : Tuple = self.get_dummy_inputs()
__A : Any = pipe(**_A ).images
__A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__A : Union[str, Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self ):
__A : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__A : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__A : List[str] = self.get_dummy_inputs()
__A : int = pipe(**_A ).images
__A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__A : List[str] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self ):
__A : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__A : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__A : Optional[Any] = self.get_dummy_inputs()
__A : List[str] = pipe(**_A ).images
__A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__A : int = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase_ ( self ):
__A : int = ort.SessionOptions()
__A : List[Any] = False
return options
def UpperCAmelCase_ ( self ):
__A : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__A : Optional[int] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__A : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
__A : int = 'A fantasy landscape, trending on artstation'
__A : List[str] = np.random.RandomState(0 )
__A : Optional[Any] = pipe(
prompt=_A , image=_A , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_A , output_type='np' , )
__A : int = output.images
__A : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__A : List[str] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__A : str = init_image.resize((768, 512) )
__A : Tuple = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
__A : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_A , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
__A : Optional[int] = 'A fantasy landscape, trending on artstation'
__A : Tuple = np.random.RandomState(0 )
__A : int = pipe(
prompt=_A , image=_A , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_A , output_type='np' , )
__A : Tuple = output.images
__A : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__A : Tuple = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 718 |
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[str] = []
__A : Tuple = []
__A : Union[str, Any] = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
__A : List[str] = len(a ) if (len(a ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(a ) , 'Postfix'.center(a ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a ) == 0:
stack.append(a ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(a ) # push x to stack
print(
x.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
while len(a ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
return "".join(a ) # return Postfix as str
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
__A : List[Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(a ) ):
if infix[i] == "(":
__A : List[str] = ')' # change "(" to ")"
elif infix[i] == ")":
__A : Any = '(' # change ")" to "("
return (infix_2_postfix(''.join(a ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase : List[str] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase : Union[str, Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 77 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class _A( snake_case__ , snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[Any] = '''convnextv2'''
def __init__( self , _A=3 , _A=4 , _A=4 , _A=None , _A=None , _A="gelu" , _A=0.0_2 , _A=1e-1_2 , _A=0.0 , _A=224 , _A=None , _A=None , **_A , ):
super().__init__(**_A )
__A : List[str] = num_channels
__A : Optional[Any] = patch_size
__A : Any = num_stages
__A : str = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__A : Optional[Any] = [3, 3, 9, 3] if depths is None else depths
__A : int = hidden_act
__A : Union[str, Any] = initializer_range
__A : List[str] = layer_norm_eps
__A : str = drop_path_rate
__A : Union[str, Any] = image_size
__A : Tuple = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__A : int = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 719 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : Tuple = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase : int = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = '''mask2former'''
UpperCamelCase : Any = ['''swin''']
UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , _A = None , _A = 256 , _A = 256 , _A = 256 , _A = 1024 , _A = "relu" , _A = 6 , _A = 10 , _A = 8 , _A = 0.0 , _A = 2048 , _A = False , _A = False , _A = 4 , _A = 255 , _A = 100 , _A = 0.1 , _A = 2.0 , _A = 5.0 , _A = 5.0 , _A = 12544 , _A = 3.0 , _A = 0.7_5 , _A = 0.0_2 , _A = 1.0 , _A = True , _A = [4, 8, 16, 32] , _A = None , **_A , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__A : Optional[int] = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_A , _A ):
__A : Dict = backbone_config.pop('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[str] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
__A : Optional[int] = backbone_config
__A : Optional[Any] = feature_size
__A : Any = mask_feature_size
__A : Optional[Any] = hidden_dim
__A : Union[str, Any] = encoder_feedforward_dim
__A : Optional[Any] = activation_function
__A : List[Any] = encoder_layers
__A : Union[str, Any] = decoder_layers
__A : Dict = num_attention_heads
__A : Tuple = dropout
__A : Dict = dim_feedforward
__A : Tuple = pre_norm
__A : Dict = enforce_input_projection
__A : Optional[int] = common_stride
__A : Optional[Any] = ignore_value
__A : str = num_queries
__A : List[Any] = no_object_weight
__A : List[str] = class_weight
__A : List[Any] = mask_weight
__A : List[Any] = dice_weight
__A : Tuple = train_num_points
__A : Optional[Any] = oversample_ratio
__A : Union[str, Any] = importance_sample_ratio
__A : Union[str, Any] = init_std
__A : int = init_xavier_std
__A : Union[str, Any] = use_auxiliary_loss
__A : Union[str, Any] = feature_strides
__A : List[Any] = output_auxiliary_logits
__A : Optional[Any] = decoder_layers
super().__init__(**_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
return cls(
backbone_config=_A , **_A , )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = copy.deepcopy(self.__dict__ )
__A : List[Any] = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
| 77 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Any = get_activation('swish' )
self.assertIsInstance(_A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = get_activation('silu' )
self.assertIsInstance(_A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase_ ( self ):
__A : str = get_activation('mish' )
self.assertIsInstance(_A , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase_ ( self ):
__A : str = get_activation('gelu' )
self.assertIsInstance(_A , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 720 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = '''conditional_detr'''
UpperCamelCase : int = ['''past_key_values''']
UpperCamelCase : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _A=True , _A=None , _A=3 , _A=300 , _A=6 , _A=2048 , _A=8 , _A=6 , _A=2048 , _A=8 , _A=0.0 , _A=0.0 , _A=True , _A="relu" , _A=256 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1.0 , _A=False , _A="sine" , _A="resnet50" , _A=True , _A=False , _A=2 , _A=5 , _A=2 , _A=1 , _A=1 , _A=2 , _A=5 , _A=2 , _A=0.2_5 , **_A , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__A : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_A , _A ):
__A : Tuple = backbone_config.get('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[Any] = config_class.from_dict(_A )
__A : Tuple = use_timm_backbone
__A : List[str] = backbone_config
__A : Dict = num_channels
__A : int = num_queries
__A : int = d_model
__A : str = encoder_ffn_dim
__A : List[str] = encoder_layers
__A : Optional[Any] = encoder_attention_heads
__A : Union[str, Any] = decoder_ffn_dim
__A : List[Any] = decoder_layers
__A : Optional[Any] = decoder_attention_heads
__A : Any = dropout
__A : Any = attention_dropout
__A : int = activation_dropout
__A : Optional[int] = activation_function
__A : Union[str, Any] = init_std
__A : Union[str, Any] = init_xavier_std
__A : Optional[Any] = encoder_layerdrop
__A : int = decoder_layerdrop
__A : List[str] = encoder_layers
__A : str = auxiliary_loss
__A : Union[str, Any] = position_embedding_type
__A : Optional[int] = backbone
__A : List[str] = use_pretrained_backbone
__A : List[Any] = dilation
# Hungarian matcher
__A : List[str] = class_cost
__A : Optional[int] = bbox_cost
__A : Dict = giou_cost
# Loss coefficients
__A : Optional[int] = mask_loss_coefficient
__A : Union[str, Any] = dice_loss_coefficient
__A : List[Any] = cls_loss_coefficient
__A : Dict = bbox_loss_coefficient
__A : Tuple = giou_loss_coefficient
__A : Tuple = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCAmelCase_ ( self ):
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ):
return self.d_model
def UpperCAmelCase_ ( self ):
__A : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__A : Dict = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCAmelCase_ ( self ):
return 1e-5
@property
def UpperCAmelCase_ ( self ):
return 12
| 77 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Any = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase : Optional[int] = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
UpperCAmelCase : List[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[str] = BertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
__A : Any = getattr(_A , normalizer_state.pop('type' ) )
__A : Union[str, Any] = do_lower_case
__A : Optional[int] = strip_accents
__A : List[Any] = tokenize_chinese_chars
__A : int = normalizer_class(**_A )
__A : Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self , _A , _A=None ):
__A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Optional[Any] = [self.sep_token_id]
__A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 721 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _A( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
__A : List[str] = nn.Linear(3 , 4 )
__A : Optional[Any] = nn.BatchNormad(4 )
__A : List[Any] = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self , _A ):
return self.lineara(self.batchnorm(self.lineara(_A ) ) )
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , model.state_dict() )
__A : str = os.path.join(_A , 'index.json' )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__A : Optional[int] = os.path.join(_A , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase_ ( self ):
__A : Dict = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__A : Tuple = torch.randn(2 , 3 , dtype=_A )
with TemporaryDirectory() as tmp_dir:
__A : int = offload_weight(_A , 'weight' , _A , {} )
__A : Union[str, Any] = os.path.join(_A , 'weight.dat' )
self.assertTrue(os.path.isfile(_A ) )
self.assertDictEqual(_A , {'weight': {'shape': [2, 3], 'dtype': str(_A ).split('.' )[1]}} )
__A : List[str] = load_offloaded_weight(_A , index['weight'] )
self.assertTrue(torch.equal(_A , _A ) )
def UpperCAmelCase_ ( self ):
__A : int = ModelForTest()
__A : Union[str, Any] = model.state_dict()
__A : Optional[Any] = {k: v for k, v in state_dict.items() if 'linear2' not in k}
__A : str = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : List[str] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
__A : Union[str, Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
__A : List[Any] = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : Optional[int] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
# Duplicates are removed
__A : str = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
def UpperCAmelCase_ ( self ):
__A : Dict = {'a.1': 0, 'a.10': 1, 'a.2': 2}
__A : str = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1': 0, 'a.2': 2} )
__A : Optional[Any] = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
__A : Any = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1.a': 0, 'a.2.a': 2} )
| 77 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=3 , _A=224 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , ):
__A : int = size if size is not None else {'height': 18, 'width': 18}
__A : int = parent
__A : int = batch_size
__A : Tuple = num_channels
__A : List[Any] = image_size
__A : str = min_resolution
__A : Optional[Any] = max_resolution
__A : str = do_resize
__A : Any = size
__A : Any = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
def UpperCAmelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : str = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processor
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__A : Tuple = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
__A : int = image_processor(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processor
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__A : List[str] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
__A : Union[str, Any] = image_processor(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processor
__A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
__A : List[str] = image_processor(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A ):
__A : Any = data
def __iter__( self ):
for element in self.data:
yield element
def _SCREAMING_SNAKE_CASE ( a=True ) -> Any:
__A : List[Any] = Accelerator(even_batches=a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _SCREAMING_SNAKE_CASE ( a , a , a , a = False ) -> str:
if iterable:
__A : int = DummyIterableDataset(torch.as_tensor(range(a ) ) )
else:
__A : Optional[Any] = TensorDataset(torch.as_tensor(range(a ) ) )
__A : Optional[Any] = DataLoader(a , batch_size=a )
__A : Optional[int] = accelerator.prepare(a )
return dl
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a , ) -> Union[str, Any]:
__A : Optional[int] = create_dataloader(accelerator=a , dataset_size=a , batch_size=a )
__A : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : int = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : str = create_accelerator(even_batches=a )
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : Optional[Any] = create_accelerator(even_batches=a )
__A : str = torch.nn.Linear(1 , 1 )
__A : Optional[int] = accelerator.prepare(a )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a ):
__A : Dict = ddp_model(batch[0].float() )
__A : List[str] = output.sum()
loss.backward()
batch_idxs.append(a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for multi-GPU" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : int = True
__A : Union[str, Any] = False
__A : Optional[int] = create_accelerator(even_batches=a )
__A : int = torch.nn.Linear(1 , 1 )
__A : List[Any] = accelerator.prepare(a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : List[str] = train_dl.batch_sampler.even_batches
__A : Dict = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Any = True
__A : List[Any] = False
__A : Tuple = create_accelerator(even_batches=a )
__A : List[str] = torch.nn.Linear(1 , 1 )
__A : Optional[Any] = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : Tuple = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__A : Any = create_accelerator()
__A : Union[str, Any] = torch.nn.Linear(1 , 1 )
__A : str = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for map-style datasets" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : str = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
__A : int = accelerator.state.distributed_type
__A : Tuple = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a )
__A : str = original_state
if __name__ == "__main__":
main()
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
if not isinstance(a , a ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(a , a ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__A : Union[str, Any] = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : str = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = '''codegen'''
UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _A=50400 , _A=2048 , _A=2048 , _A=4096 , _A=28 , _A=16 , _A=64 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.0_2 , _A=True , _A=50256 , _A=50256 , _A=False , **_A , ):
__A : Any = vocab_size
__A : Tuple = n_ctx
__A : Union[str, Any] = n_positions
__A : Optional[Any] = n_embd
__A : Any = n_layer
__A : Dict = n_head
__A : Union[str, Any] = n_inner
__A : List[Any] = rotary_dim
__A : str = activation_function
__A : Any = resid_pdrop
__A : Tuple = embd_pdrop
__A : Tuple = attn_pdrop
__A : Union[str, Any] = layer_norm_epsilon
__A : str = initializer_range
__A : Optional[Any] = use_cache
__A : Union[str, Any] = bos_token_id
__A : Tuple = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , 'pad_token_id' , _A ):
# TODO: how to do that better?
__A : Dict = 0
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
__A : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__A : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self ):
return self._config.n_layer
@property
def UpperCAmelCase_ ( self ):
return self._config.n_head
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : Any = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__A : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__A : Any = seqlen + 2
__A : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A : Optional[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__A : Tuple = common_inputs['attention_mask']
if self.use_past:
__A : str = ordered_inputs['attention_mask'].dtype
__A : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self ):
return 13
| 77 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[Any] = KandinskyVaaInpaintPipeline
UpperCamelCase : Union[str, Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
UpperCamelCase : Optional[int] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
UpperCamelCase : Dict = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase : Optional[Any] = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 100
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : str = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__A : Optional[int] = UNetaDConditionModel(**_A )
return model
@property
def UpperCAmelCase_ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self ):
__A : str = self.dummy_unet
__A : Dict = self.dummy_movq
__A : Any = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='epsilon' , thresholding=_A , )
__A : int = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
__A : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
__A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
__A : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
__A : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : List[Any] = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((256, 256) )
# create mask
__A : Any = np.ones((64, 64) , dtype=np.floataa )
__A : Union[str, Any] = 0
if str(_A ).startswith('mps' ):
__A : int = torch.manual_seed(_A )
else:
__A : str = torch.Generator(device=_A ).manual_seed(_A )
__A : Dict = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = 'cpu'
__A : List[Any] = self.get_dummy_components()
__A : Union[str, Any] = self.pipeline_class(**_A )
__A : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Dict = pipe(**self.get_dummy_inputs(_A ) )
__A : List[str] = output.images
__A : Any = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__A : Any = image[0, -3:, -3:, -1]
__A : Dict = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__A : Any = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__A : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__A : List[str] = np.ones((768, 768) , dtype=np.floataa )
__A : Optional[int] = 0
__A : int = 'a hat'
__A : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__A : Any = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__A : Optional[int] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__A : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
__A : Union[str, Any] = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__A : Any = pipeline(
image=_A , mask_image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
__A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 702 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 77 | 0 |
import os
import sys
import unittest
UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
UpperCAmelCase : Union[str, Any] = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
UpperCAmelCase : Dict = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[str] = get_test_to_tester_mapping(_A )
__A : Tuple = get_test_to_tester_mapping(_A )
__A : List[Any] = {'BertModelTest': 'BertModelTester'}
__A : Optional[Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def UpperCAmelCase_ ( self ):
__A : Any = get_model_to_test_mapping(_A )
__A : str = get_model_to_test_mapping(_A )
__A : Dict = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
__A : Optional[int] = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def UpperCAmelCase_ ( self ):
__A : str = get_model_to_tester_mapping(_A )
__A : Optional[Any] = get_model_to_tester_mapping(_A )
__A : Optional[int] = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
__A : str = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
| 703 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : Union[str, Any] = ''''''
UpperCAmelCase : Optional[int] = ''''''
UpperCAmelCase : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ) -> None:
__A , __A : List[Any] = get_dataset(a , a )
print('Processing...' )
__A , __A , __A : Optional[Any] = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__A : Optional[int] = random_chars(32 )
__A : Dict = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__A : Dict = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(a )} with {file_name}""" )
__A : int = []
for anno in new_annos[index]:
__A : Any = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a )
with open(F"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> tuple[list, list]:
__A : int = []
__A : List[Any] = []
for label_file in glob.glob(os.path.join(a , '*.txt' ) ):
__A : List[str] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(a ) as in_file:
__A : Tuple = in_file.readlines()
__A : Dict = os.path.join(a , F"""{label_name}.jpg""" )
__A : Dict = []
for obj_list in obj_lists:
__A : int = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( a , a , a = 1 ) -> tuple[list, list, list]:
__A : int = []
__A : Optional[Any] = []
__A : Dict = []
for idx in range(len(a ) ):
__A : Dict = []
__A : Optional[Any] = img_list[idx]
path_list.append(a )
__A : Union[str, Any] = anno_list[idx]
__A : Optional[Any] = cva.imread(a )
if flip_type == 1:
__A : Any = cva.flip(a , a )
for bbox in img_annos:
__A : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__A : Union[str, Any] = cva.flip(a , a )
for bbox in img_annos:
__A : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( a = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__A : List[Any] = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 77 | 0 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Optional[int] = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a=False , a=True ) -> List[Any]:
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__A : List[str] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__A : List[str] = cached_file(a , a , force_download=not use_cached_models )
__A : Tuple = config_class.from_json_file(a )
__A : Dict = True
__A : Optional[Any] = True
print(F"""Building TensorFlow model from configuration: {config}""" )
__A : str = model_class(a )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__A : Dict = cached_file(
a , a , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__A : Optional[int] = load_pytorch_checkpoint_in_tfa_model(a , a )
if compare_with_pt_model:
__A : Tuple = tf_model(tf_model.dummy_inputs , training=a ) # build the network
__A : List[Any] = torch.load(a , map_location='cpu' )
__A : Optional[int] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a , config=a , state_dict=a )
with torch.no_grad():
__A : Any = pt_model(**pt_model.dummy_inputs )
__A : int = pto[0].numpy()
__A : List[Any] = tfo[0].numpy()
__A : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2e-2, F"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(F"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(a , save_format='h5' )
def _SCREAMING_SNAKE_CASE ( a , a , a=None , a=None , a=False , a=False , a=False , a=False , ) -> Optional[int]:
if args_model_type is None:
__A : Union[str, Any] = list(MODEL_CLASSES.keys() )
else:
__A : List[Any] = [args_model_type]
for j, model_type in enumerate(a , start=1 ):
print('=' * 1_00 )
print(F""" Converting model type {j}/{len(a )}: {model_type}""" )
print('=' * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__A : Tuple = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__A : Union[str, Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__A : List[str] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a , a ) , start=1 ):
print('-' * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
__A : int = model_shortcut_name
elif only_convert_finetuned_models:
print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
F""" Converting checkpoint {i}/{len(a )}: {model_shortcut_name} - model_type {model_type}""" )
print('-' * 1_00 )
if config_shortcut_name in aws_config_map:
__A : List[Any] = cached_file(a , a , force_download=not use_cached_models )
else:
__A : Any = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__A : Tuple = cached_file(a , a , force_download=not use_cached_models )
else:
__A : List[str] = model_shortcut_name
if os.path.isfile(a ):
__A : int = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=a , pytorch_checkpoint_path=a , config_file=a , tf_dump_path=os.path.join(a , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=a , )
if remove_cached_files:
os.remove(a )
os.remove(a )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
UpperCAmelCase : Dict = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 704 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
__A : Union[str, Any] = parent
__A : List[str] = batch_size
__A : Optional[int] = seq_length
__A : List[Any] = is_training
__A : Optional[Any] = use_input_mask
__A : List[Any] = use_token_type_ids
__A : Optional[Any] = use_labels
__A : List[str] = vocab_size
__A : Optional[int] = hidden_size
__A : List[Any] = num_hidden_layers
__A : int = num_attention_heads
__A : Dict = intermediate_size
__A : Any = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Dict = type_vocab_size
__A : Any = type_sequence_label_size
__A : Dict = initializer_range
__A : str = num_labels
__A : Union[str, Any] = num_choices
__A : str = scope
def UpperCAmelCase_ ( self ):
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_input_mask:
__A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = None
__A : List[Any] = None
__A : List[Any] = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : List[str] = LlamaModel(config=_A )
model.to(_A )
model.eval()
__A : Any = model(_A , attention_mask=_A )
__A : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Dict = True
__A : int = LlamaModel(_A )
model.to(_A )
model.eval()
__A : str = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
__A : List[Any] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Optional[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : int = True
__A : List[Any] = True
__A : List[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__A : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
__A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : str = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0]
__A : Union[str, Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Tuple = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : int = False
UpperCamelCase : Dict = False
def UpperCAmelCase_ ( self ):
__A : List[Any] = LlamaModelTester(self )
__A : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : int = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : Optional[int] = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(_A )
__A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = 3
__A : Tuple = 'single_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[int] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : int = 'multi_label_classification'
__A : int = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : List[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def UpperCAmelCase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCAmelCase_ ( self , _A ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = ids_tensor([1, 10] , config.vocab_size )
__A : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = LlamaModel(_A )
original_model.to(_A )
original_model.eval()
__A : Dict = original_model(_A ).last_hidden_state
__A : int = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : int = {'type': scaling_type, 'factor': 1_0.0}
__A : str = LlamaModel(_A )
scaled_model.to(_A )
scaled_model.eval()
__A : Dict = scaled_model(_A ).last_hidden_state
__A : str = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
__A : Union[str, Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__A : Optional[int] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : str = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[str] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
__A : int = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : List[str] = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
__A : Optional[int] = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : Optional[Any] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
__A : List[Any] = model(torch.tensor(_A ) )
__A : Tuple = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# fmt: off
__A : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
__A : List[str] = 'Simply put, the theory of relativity states that '
__A : Union[str, Any] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
__A : List[str] = tokenizer.encode(_A , return_tensors='pt' )
__A : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_A )
# greedy generation outputs
__A : Union[str, Any] = model.generate(_A , max_new_tokens=64 , top_p=_A , temperature=1 , do_sample=_A )
__A : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
| 77 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = '''vit_msn'''
def __init__( self , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1e-0_6 , _A=224 , _A=16 , _A=3 , _A=True , **_A , ):
super().__init__(**_A )
__A : str = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : Dict = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : List[Any] = hidden_act
__A : List[str] = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : str = initializer_range
__A : str = layer_norm_eps
__A : List[str] = image_size
__A : Any = patch_size
__A : Optional[int] = num_channels
__A : Union[str, Any] = qkv_bias
| 705 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase : str = HfApi()
UpperCAmelCase : List[str] = {}
# fmt: off
UpperCAmelCase : Optional[Any] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCAmelCase : Dict = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCAmelCase : str = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCAmelCase : Optional[Any] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCAmelCase : List[Any] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCAmelCase : Optional[int] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCAmelCase : Tuple = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCAmelCase : Any = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCAmelCase : Tuple = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCAmelCase : Dict = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCAmelCase : Tuple = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCAmelCase : List[str] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCAmelCase : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase : Union[str, Any] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
UpperCAmelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
UpperCAmelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase : int = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase : Optional[int] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase : Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 77 | 0 |
from __future__ import annotations
from cmath import sqrt
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
__A : List[str] = b * b - 4 * a * c
__A : Dict = (-b + sqrt(a )) / (2 * a)
__A : Optional[Any] = (-b - sqrt(a )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : Optional[Any] = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 706 |
import numpy as np
from PIL import Image
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> np.ndarray:
__A : Union[str, Any] = np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__A : List[Any] = 0
__A : Optional[Any] = 0
__A : List[Any] = 0
__A : Dict = 0
# compute the shape of the output matrix
__A : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__A : Optional[int] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__A : Tuple = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A : List[str] = 0
__A : Union[str, Any] = 0
return updated_arr
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> np.ndarray:
__A : List[Any] = np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__A : Dict = 0
__A : str = 0
__A : Tuple = 0
__A : Optional[int] = 0
# compute the shape of the output matrix
__A : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__A : Any = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__A : Tuple = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A : Dict = 0
__A : int = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
UpperCAmelCase : int = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 77 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Union[str, Any]:
__A : int = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
__A : List[Any] = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(a ):
os.makedirs(a )
__A : Union[str, Any] = model.state_dict()
def to_tf_var_name(a ):
for patt, repl in iter(a ):
__A : Optional[Any] = name.replace(a , a )
return F"""bert/{name}"""
def create_tf_var(a , a , a ):
__A : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
__A : List[Any] = tf.get_variable(dtype=a , shape=tensor.shape , name=a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A : Optional[Any] = to_tf_var_name(a )
__A : List[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A : Any = torch_tensor.T
__A : int = create_tf_var(tensor=a , name=a , session=a )
tf.keras.backend.set_value(a , a )
__A : List[Any] = session.run(a )
print(F"""Successfully created {tf_name}: {np.allclose(a , a )}""" )
__A : Optional[int] = tf.train.Saver(tf.trainable_variables() )
saver.save(a , os.path.join(a , model_name.replace('-' , '_' ) + '.ckpt' ) )
def _SCREAMING_SNAKE_CASE ( a=None ) -> Union[str, Any]:
__A : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=a , required=a , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=a , default=a , required=a , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=a , required=a , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=a , required=a , help='Directory in which to save tensorflow model' )
__A : Tuple = parser.parse_args(a )
__A : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 707 |
from __future__ import annotations
from collections.abc import Callable
def _SCREAMING_SNAKE_CASE ( a , a , a , a = 1_00 , ) -> float:
__A : Any = x_start
__A : List[str] = fnc(a )
__A : Optional[Any] = 0.0
for _ in range(a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__A : Any = (x_end - x_start) / steps + xa
__A : List[str] = fnc(a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__A : Any = xa
__A : Dict = fxa
return area
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
UpperCAmelCase : Tuple = 10
while i <= 10_00_00:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[Any] = [1]
__A : Union[str, Any] = 0, 0, 0
__A : Optional[int] = ugly_nums[ia] * 2
__A : Any = ugly_nums[ia] * 3
__A : str = ugly_nums[ia] * 5
for _ in range(1 , a ):
__A : Tuple = min(a , a , a )
ugly_nums.append(a )
if next_num == next_a:
ia += 1
__A : int = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__A : Union[str, Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__A : List[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(2_00) = }""")
| 708 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _SCREAMING_SNAKE_CASE ( ) -> None:
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[tuple[int, int], tuple[int, int]]:
print('Generating prime p...' )
__A : Optional[Any] = rabinMiller.generate_large_prime(a )
print('Generating prime q...' )
__A : Union[str, Any] = rabinMiller.generate_large_prime(a )
__A : Tuple = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
__A : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(a , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
__A : Any = cryptoMath.find_mod_inverse(a , (p - 1) * (q - 1) )
__A : Dict = (n, e)
__A : Dict = (n, d)
return (public_key, private_key)
def _SCREAMING_SNAKE_CASE ( a , a ) -> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__A , __A : Optional[int] = generate_key(a )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__A : List[Any] = 4
__A : Any = (1 << p) - 1
for _ in range(p - 2 ):
__A : Any = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 709 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = ProphetNetTokenizer
UpperCamelCase : Tuple = False
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = 'UNwant\u00E9d,running'
__A : List[str] = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self ):
__A : Tuple = self.tokenizer_class(self.vocab_file )
__A : List[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase_ ( self ):
__A : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCAmelCase_ ( self ):
__A : List[str] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Dict = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : List[Any] = BasicTokenizer(do_lower_case=_A , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__A : Optional[int] = {}
for i, token in enumerate(_A ):
__A : Tuple = i
__A : Tuple = WordpieceTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def UpperCAmelCase_ ( self ):
__A : int = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__A : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__A : str = tokenizer(_A , padding=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
__A : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Any = tokenizer.encode('sequence builders' , add_special_tokens=_A )
__A : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_A )
__A : str = tokenizer.build_inputs_with_special_tokens(_A )
__A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 77 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = '''codegen'''
UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _A=50400 , _A=2048 , _A=2048 , _A=4096 , _A=28 , _A=16 , _A=64 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.0_2 , _A=True , _A=50256 , _A=50256 , _A=False , **_A , ):
__A : Any = vocab_size
__A : Tuple = n_ctx
__A : Union[str, Any] = n_positions
__A : Optional[Any] = n_embd
__A : Any = n_layer
__A : Dict = n_head
__A : Union[str, Any] = n_inner
__A : List[Any] = rotary_dim
__A : str = activation_function
__A : Any = resid_pdrop
__A : Tuple = embd_pdrop
__A : Tuple = attn_pdrop
__A : Union[str, Any] = layer_norm_epsilon
__A : str = initializer_range
__A : Optional[Any] = use_cache
__A : Union[str, Any] = bos_token_id
__A : Tuple = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , 'pad_token_id' , _A ):
# TODO: how to do that better?
__A : Dict = 0
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
__A : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__A : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self ):
return self._config.n_layer
@property
def UpperCAmelCase_ ( self ):
return self._config.n_head
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : Any = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__A : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__A : Any = seqlen + 2
__A : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A : Optional[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__A : Tuple = common_inputs['attention_mask']
if self.use_past:
__A : str = ordered_inputs['attention_mask'].dtype
__A : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self ):
return 13
| 710 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Any = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase : Optional[int] = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
UpperCAmelCase : List[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[str] = BertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
__A : Any = getattr(_A , normalizer_state.pop('type' ) )
__A : Union[str, Any] = do_lower_case
__A : Optional[int] = strip_accents
__A : List[Any] = tokenize_chinese_chars
__A : int = normalizer_class(**_A )
__A : Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self , _A , _A=None ):
__A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Optional[Any] = [self.sep_token_id]
__A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 77 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = None
UpperCamelCase : Optional[Any] = BloomTokenizerFast
UpperCamelCase : List[str] = BloomTokenizerFast
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = '''tokenizer_file'''
UpperCamelCase : List[Any] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Optional[int] = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **_A ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.get_rust_tokenizer()
__A : int = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
__A : Union[str, Any] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
__A : Optional[int] = tokenizer.batch_encode_plus(_A )['input_ids']
self.assertListEqual(_A , _A )
__A : Optional[Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase_ ( self , _A=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__A : List[str] = 'This is a simple input'
__A : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
__A : str = ('This is a simple input', 'This is a pair')
__A : Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_A , max_length=_A )
tokenizer_r.encode_plus(_A , max_length=_A )
tokenizer_r.batch_encode_plus(_A , max_length=_A )
tokenizer_r.encode(_A , max_length=_A )
tokenizer_r.batch_encode_plus(_A , max_length=_A )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
__A : List[str] = None # Hotfixing padding = None
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='max_length' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='max_length' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='max_length' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='max_length' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='max_length' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='max_length' , )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.get_rust_tokenizer()
__A : Union[str, Any] = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_A )
__A : Union[str, Any] = next(iter(_A ) )['premise'] # pick up one data
__A : int = list(sample_data.values() )
__A : Optional[Any] = list(map(tokenizer.encode , _A ) )
__A : List[Any] = [tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) for x in output_tokens]
self.assertListEqual(_A , _A )
def UpperCAmelCase_ ( self ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 711 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
debug_launcher(test_script.main )
def UpperCAmelCase_ ( self ):
debug_launcher(test_ops.main )
| 77 | 0 |
from __future__ import annotations
from math import gcd
def _SCREAMING_SNAKE_CASE ( a , a = 2 , a = 1 , a = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(a , a , a ) -> int:
return (pow(a , 2 ) + step) % modulus
for _ in range(a ):
# These track the position within the cycle detection logic.
__A : Dict = seed
__A : Optional[int] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__A : Tuple = rand_fn(a , a , a )
__A : Dict = rand_fn(a , a , a )
__A : Optional[Any] = rand_fn(a , a , a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__A : Optional[int] = gcd(hare - tortoise , a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__A : int = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
UpperCAmelCase : Tuple = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
UpperCAmelCase : Tuple = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 712 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Tuple = tempfile.mkdtemp()
# fmt: off
__A : Union[str, Any] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Dict = dict(zip(_A , range(len(_A ) ) ) )
__A : int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : Optional[Any] = {'unk_token': '<unk>'}
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : Union[str, Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : List[str] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_tokenizer()
__A : Dict = self.get_rust_tokenizer()
__A : Optional[Any] = self.get_image_processor()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Any = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : int = self.get_image_processor(do_normalize=_A )
__A : int = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : List[Any] = self.prepare_image_inputs()
__A : Any = image_processor(_A , return_tensors='np' )
__A : Tuple = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = 'lower newer'
__A : Any = processor(text=_A , return_tensors='np' )
__A : Dict = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Tuple = 'lower newer'
__A : Union[str, Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Any = ['cat', 'nasa badge']
__A : List[Any] = processor(text=_A )
__A : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : int = [['cat', 'nasa badge'], ['person']]
__A : str = processor(text=_A )
__A : int = 16
__A : Optional[int] = len(_A )
__A : int = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : int = 'google/owlvit-base-patch32'
__A : List[str] = OwlViTProcessor.from_pretrained(_A )
__A : Tuple = ['cat', 'nasa badge']
__A : Dict = processor(text=_A )
__A : Tuple = 16
__A : str = inputs['input_ids']
__A : str = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Dict = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = self.prepare_image_inputs()
__A : Tuple = self.prepare_image_inputs()
__A : Any = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Union[str, Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 77 | 0 |
from __future__ import annotations
import time
UpperCAmelCase : Optional[int] = list[tuple[int, int]]
UpperCAmelCase : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A ):
__A : Union[str, Any] = pos_x
__A : List[Any] = pos_y
__A : Tuple = (pos_y, pos_x)
__A : Union[str, Any] = goal_x
__A : Optional[Any] = goal_y
__A : Any = parent
class _A:
"""simple docstring"""
def __init__( self , _A , _A ):
__A : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , _A )
__A : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , _A )
__A : List[Any] = [self.start]
__A : List[str] = False
def UpperCAmelCase_ ( self ):
while self.node_queue:
__A : Union[str, Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__A : Union[str, Any] = True
return self.retrace_path(_A )
__A : Optional[int] = self.get_successors(_A )
for node in successors:
self.node_queue.append(_A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase_ ( self , _A ):
__A : Any = []
for action in delta:
__A : str = parent.pos_x + action[1]
__A : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_A , _A , self.target.pos_y , self.target.pos_x , _A ) )
return successors
def UpperCAmelCase_ ( self , _A ):
__A : Any = node
__A : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__A : str = current_node.parent
path.reverse()
return path
class _A:
"""simple docstring"""
def __init__( self , _A , _A ):
__A : Union[str, Any] = BreadthFirstSearch(_A , _A )
__A : Dict = BreadthFirstSearch(_A , _A )
__A : Optional[Any] = False
def UpperCAmelCase_ ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__A : str = self.fwd_bfs.node_queue.pop(0 )
__A : Optional[Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__A : List[str] = True
return self.retrace_bidirectional_path(
_A , _A )
__A : Any = current_bwd_node
__A : Optional[int] = current_fwd_node
__A : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(_A ),
self.bwd_bfs: self.bwd_bfs.get_successors(_A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase_ ( self , _A , _A ):
__A : Optional[int] = self.fwd_bfs.retrace_path(_A )
__A : List[str] = self.bwd_bfs.retrace_path(_A )
bwd_path.pop()
bwd_path.reverse()
__A : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCAmelCase : List[Any] = (0, 0)
UpperCAmelCase : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase : Optional[Any] = time.time()
UpperCAmelCase : int = BreadthFirstSearch(init, goal)
UpperCAmelCase : int = bfs.search()
UpperCAmelCase : Union[str, Any] = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
UpperCAmelCase : Dict = time.time()
UpperCAmelCase : str = BidirectionalBreadthFirstSearch(init, goal)
UpperCAmelCase : Optional[int] = bd_bfs.search()
UpperCAmelCase : List[str] = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Tuple:
for attribute in key.split('.' ):
__A : Dict = getattr(a , a )
if weight_type is not None:
__A : Any = getattr(a , a ).shape
else:
__A : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__A : Union[str, Any] = value
elif weight_type == "weight_g":
__A : Dict = value
elif weight_type == "weight_v":
__A : Optional[int] = value
elif weight_type == "bias":
__A : int = value
elif weight_type == "running_mean":
__A : Union[str, Any] = value
elif weight_type == "running_var":
__A : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
__A : Any = value
elif weight_type == "inv_freq":
__A : Optional[Any] = value
else:
__A : int = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Union[str, Any]:
__A : Any = []
__A : Optional[int] = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__A : int = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__A : Any = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : Optional[Any] = True
if "*" in mapped_key:
__A : str = name.split(a )[0].split('.' )[-2]
__A : int = mapped_key.replace('*' , a )
if "pos_bias_u" in name:
__A : Optional[int] = None
elif "pos_bias_v" in name:
__A : Dict = None
elif "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Dict = 'weight_v'
elif "bias" in name:
__A : Tuple = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : int = 'weight'
elif "running_mean" in name:
__A : str = 'running_mean'
elif "inv_freq" in name:
__A : List[Any] = 'inv_freq'
elif "running_var" in name:
__A : Union[str, Any] = 'running_var'
elif "num_batches_tracked" in name:
__A : Optional[Any] = 'num_batches_tracked'
else:
__A : List[str] = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Any:
__A : str = full_name.split('conv_layers.' )[-1]
__A : str = name.split('.' )
__A : Dict = int(items[0] )
__A : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__A : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__A : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a , a=None , a=None , a=True ) -> Any:
if config_path is not None:
__A : Tuple = WavaVecaConformerConfig.from_pretrained(a , hidden_act='swish' )
else:
__A : Optional[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__A : Dict = 'rotary'
if is_finetuned:
if dict_path:
__A : Dict = Dictionary.load(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A : int = target_dict.pad_index
__A : List[Any] = target_dict.bos_index
__A : Any = target_dict.eos_index
__A : Dict = len(target_dict.symbols )
__A : Optional[Any] = os.path.join(a , 'vocab.json' )
if not os.path.isdir(a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(a ) )
return
os.makedirs(a , exist_ok=a )
__A : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__A : int = 0
__A : Optional[Any] = 1
with open(a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(a , a )
__A : Optional[Any] = WavaVecaCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=a , )
__A : Tuple = True if config.feat_extract_norm == 'layer' else False
__A : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
__A : Optional[int] = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
__A : List[Any] = WavaVecaConformerForCTC(a )
else:
__A : List[Any] = WavaVecaConformerForPreTraining(a )
if is_finetuned:
__A , __A , __A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__A : Optional[Any] = argparse.Namespace(task='audio_pretraining' )
__A : str = fairseq.tasks.setup_task(a )
__A , __A , __A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a )
__A : Tuple = model[0].eval()
recursively_load_weights(a , a , not is_finetuned )
hf_wavavec.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a ) -> str:
return " ".join(
''.join(word[::-1] ) if len(a ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 714 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A( snake_case__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( _A ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self ):
raise NotImplementedError()
| 77 | 0 |
import sys
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : List[Any] = len(a )
__A : int = [[0 for x in range(a )] for x in range(a )]
__A : Any = [[0 for x in range(a )] for x in range(a )]
for chain_length in range(2 , a ):
for a in range(1 , n - chain_length + 1 ):
__A : Tuple = a + chain_length - 1
__A : Union[str, Any] = sys.maxsize
for c in range(a , a ):
__A : Optional[int] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__A : List[str] = cost
__A : Union[str, Any] = c
return matrix, sol
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> List[Any]:
if i == j:
print('A' + str(a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(a , a , optimal_solution[i][j] )
print_optiomal_solution(a , optimal_solution[i][j] + 1 , a )
print(')' , end=' ' )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__A : Tuple = [30, 35, 15, 5, 10, 20, 25]
__A : Any = len(a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__A : Dict = matrix_chain_order(a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 715 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Optional[int] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 77 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = '''vivit'''
def __init__( self , _A=224 , _A=32 , _A=[2, 16, 16] , _A=3 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu_fast" , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1e-0_6 , _A=True , **_A , ):
__A : List[str] = hidden_size
__A : Tuple = num_hidden_layers
__A : Any = num_attention_heads
__A : str = intermediate_size
__A : Union[str, Any] = hidden_act
__A : int = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : Union[str, Any] = initializer_range
__A : List[Any] = layer_norm_eps
__A : int = image_size
__A : Optional[int] = num_frames
__A : Optional[Any] = tubelet_size
__A : Union[str, Any] = num_channels
__A : Union[str, Any] = qkv_bias
super().__init__(**_A )
| 716 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = ShapEPipeline
UpperCamelCase : str = ['''prompt''']
UpperCamelCase : Tuple = ['''prompt''']
UpperCamelCase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase : int = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 8
@property
def UpperCAmelCase_ ( self ):
__A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__A : Optional[Any] = PriorTransformer(**_A )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__A : List[Any] = ShapERenderer(**_A )
return model
def UpperCAmelCase_ ( self ):
__A : List[str] = self.dummy_prior
__A : Optional[int] = self.dummy_text_encoder
__A : List[Any] = self.dummy_tokenizer
__A : str = self.dummy_renderer
__A : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
__A : Any = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(_A ).startswith('mps' ):
__A : List[Any] = torch.manual_seed(_A )
else:
__A : Dict = torch.Generator(device=_A ).manual_seed(_A )
__A : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : Tuple = 'cpu'
__A : Any = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**_A )
__A : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Tuple = pipe(**self.get_dummy_inputs(_A ) )
__A : int = output.images[0]
__A : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__A : Any = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self ):
__A : List[str] = torch_device == 'cpu'
__A : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCAmelCase_ ( self ):
__A : Any = self.get_dummy_components()
__A : Any = self.pipeline_class(**_A )
__A : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Any = 1
__A : Dict = 2
__A : Tuple = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
__A : Optional[int] = batch_size * [inputs[key]]
__A : Optional[int] = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__A : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__A : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : str = torch.Generator(device=_A ).manual_seed(0 )
__A : Tuple = pipe(
'a shark' , generator=_A , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 77 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A( snake_case__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( _A ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self ):
raise NotImplementedError()
| 717 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if len(a ) != 2 or len(a[0] ) != 2 or len(a ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__A : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[int]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[list, list, list, list]:
if len(a ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__A : str = len(a )
__A : List[Any] = matrix_length // 2
__A : List[str] = [[a[i][j] for j in range(a , a )] for i in range(a )]
__A : Dict = [
[a[i][j] for j in range(a , a )] for i in range(a , a )
]
__A : int = [[a[i][j] for j in range(a )] for i in range(a )]
__A : Any = [[a[i][j] for j in range(a )] for i in range(a , a )]
return top_left, top_right, bot_left, bot_right
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[int, int]:
return len(a ), len(matrix[0] )
def _SCREAMING_SNAKE_CASE ( a ) -> None:
print('\n'.join(str(a ) for line in matrix ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a ) == (2, 2):
return default_matrix_multiplication(a , a )
__A , __A , __A , __A : str = split_matrix(a )
__A , __A , __A , __A : List[Any] = split_matrix(a )
__A : Any = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Tuple = actual_strassen(matrix_addition(a , a ) , a )
__A : List[str] = actual_strassen(matrix_addition(a , a ) , a )
__A : Optional[int] = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Any = actual_strassen(matrix_addition(a , a ) , matrix_addition(a , a ) )
__A : Any = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
__A : Union[str, Any] = matrix_addition(a , a )
__A : str = matrix_addition(a , a )
__A : Dict = matrix_subtraction(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
# construct the new matrix from our 4 quadrants
__A : List[Any] = []
for i in range(len(a ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a )[1] != matrix_dimensions(a )[0]:
__A : Dict = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a )
__A : int = matrix_dimensions(a )
__A : Any = matrix_dimensions(a )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__A : List[Any] = max(*a , *a )
__A : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(a ) ) ) )
__A : Union[str, Any] = matrixa
__A : Optional[int] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__A : str = actual_strassen(a , a )
# Removing the additional zeros
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 77 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class _A:
"""simple docstring"""
def __init__( self ):
__A : Optional[int] = False
def UpperCAmelCase_ ( self , _A , _A , _A , _A ):
if not self.initialized:
__A : List[str] = RagRetriever(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__A : List[str] = True
def UpperCAmelCase_ ( self ):
self.retriever.index.init_index()
def UpperCAmelCase_ ( self , _A , _A ):
__A : str = self.retriever._main_retrieve(_A , _A )
return doc_ids, retrieved_doc_embeds
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A=None ):
if index is not None and index.is_initialized() and len(_A ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__A : int = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_A , _A , _A , _A )
for worker in self.retrieval_workers
] )
def UpperCAmelCase_ ( self ):
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def UpperCAmelCase_ ( self , _A , _A ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__A : List[str] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__A : List[str] = ray.get(random_worker.retrieve.remote(_A , _A ) )
else:
__A : Optional[int] = self._main_retrieve(_A , _A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , _A=None , **_A ):
return super(_A , cls ).get_tokenizers(_A , _A , **_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , _A , _A=None , **_A ):
__A : List[str] = kwargs.pop('config' , _A ) or RagConfig.from_pretrained(_A , **_A )
__A : str = RagTokenizer.from_pretrained(_A , config=_A )
__A : Any = rag_tokenizer.question_encoder
__A : List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
__A : str = 'custom'
__A : Dict = CustomHFIndex(config.retrieval_vector_size , _A )
else:
__A : Any = cls._build_index(_A )
return cls(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , retrieval_workers=_A , index=_A , )
| 718 |
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[str] = []
__A : Tuple = []
__A : Union[str, Any] = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
__A : List[str] = len(a ) if (len(a ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(a ) , 'Postfix'.center(a ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a ) == 0:
stack.append(a ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(a ) # push x to stack
print(
x.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
while len(a ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
return "".join(a ) # return Postfix as str
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
__A : List[Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(a ) ):
if infix[i] == "(":
__A : List[str] = ')' # change "(" to ")"
elif infix[i] == ")":
__A : Any = '(' # change ")" to "("
return (infix_2_postfix(''.join(a ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase : List[str] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase : Union[str, Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 77 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = '''Salesforce/blip-image-captioning-base'''
UpperCamelCase : str = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
UpperCamelCase : int = '''image_captioner'''
UpperCamelCase : Union[str, Any] = AutoModelForVisionaSeq
UpperCamelCase : Dict = ['''image''']
UpperCamelCase : Union[str, Any] = ['''text''']
def __init__( self , *_A , **_A ):
requires_backends(self , ['vision'] )
super().__init__(*_A , **_A )
def UpperCAmelCase_ ( self , _A ):
return self.pre_processor(images=_A , return_tensors='pt' )
def UpperCAmelCase_ ( self , _A ):
return self.model.generate(**_A )
def UpperCAmelCase_ ( self , _A ):
return self.pre_processor.batch_decode(_A , skip_special_tokens=_A )[0].strip()
| 719 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : Tuple = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase : int = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = '''mask2former'''
UpperCamelCase : Any = ['''swin''']
UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , _A = None , _A = 256 , _A = 256 , _A = 256 , _A = 1024 , _A = "relu" , _A = 6 , _A = 10 , _A = 8 , _A = 0.0 , _A = 2048 , _A = False , _A = False , _A = 4 , _A = 255 , _A = 100 , _A = 0.1 , _A = 2.0 , _A = 5.0 , _A = 5.0 , _A = 12544 , _A = 3.0 , _A = 0.7_5 , _A = 0.0_2 , _A = 1.0 , _A = True , _A = [4, 8, 16, 32] , _A = None , **_A , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__A : Optional[int] = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_A , _A ):
__A : Dict = backbone_config.pop('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[str] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
__A : Optional[int] = backbone_config
__A : Optional[Any] = feature_size
__A : Any = mask_feature_size
__A : Optional[Any] = hidden_dim
__A : Union[str, Any] = encoder_feedforward_dim
__A : Optional[Any] = activation_function
__A : List[Any] = encoder_layers
__A : Union[str, Any] = decoder_layers
__A : Dict = num_attention_heads
__A : Tuple = dropout
__A : Dict = dim_feedforward
__A : Tuple = pre_norm
__A : Dict = enforce_input_projection
__A : Optional[int] = common_stride
__A : Optional[Any] = ignore_value
__A : str = num_queries
__A : List[Any] = no_object_weight
__A : List[str] = class_weight
__A : List[Any] = mask_weight
__A : List[Any] = dice_weight
__A : Tuple = train_num_points
__A : Optional[Any] = oversample_ratio
__A : Union[str, Any] = importance_sample_ratio
__A : Union[str, Any] = init_std
__A : int = init_xavier_std
__A : Union[str, Any] = use_auxiliary_loss
__A : Union[str, Any] = feature_strides
__A : List[Any] = output_auxiliary_logits
__A : Optional[Any] = decoder_layers
super().__init__(**_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
return cls(
backbone_config=_A , **_A , )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = copy.deepcopy(self.__dict__ )
__A : List[Any] = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
| 77 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : Union[str, Any] = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ['''MaskFormerFeatureExtractor''']
UpperCAmelCase : Union[str, Any] = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
UpperCAmelCase : Optional[int] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 720 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = '''conditional_detr'''
UpperCamelCase : int = ['''past_key_values''']
UpperCamelCase : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _A=True , _A=None , _A=3 , _A=300 , _A=6 , _A=2048 , _A=8 , _A=6 , _A=2048 , _A=8 , _A=0.0 , _A=0.0 , _A=True , _A="relu" , _A=256 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1.0 , _A=False , _A="sine" , _A="resnet50" , _A=True , _A=False , _A=2 , _A=5 , _A=2 , _A=1 , _A=1 , _A=2 , _A=5 , _A=2 , _A=0.2_5 , **_A , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__A : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_A , _A ):
__A : Tuple = backbone_config.get('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[Any] = config_class.from_dict(_A )
__A : Tuple = use_timm_backbone
__A : List[str] = backbone_config
__A : Dict = num_channels
__A : int = num_queries
__A : int = d_model
__A : str = encoder_ffn_dim
__A : List[str] = encoder_layers
__A : Optional[Any] = encoder_attention_heads
__A : Union[str, Any] = decoder_ffn_dim
__A : List[Any] = decoder_layers
__A : Optional[Any] = decoder_attention_heads
__A : Any = dropout
__A : Any = attention_dropout
__A : int = activation_dropout
__A : Optional[int] = activation_function
__A : Union[str, Any] = init_std
__A : Union[str, Any] = init_xavier_std
__A : Optional[Any] = encoder_layerdrop
__A : int = decoder_layerdrop
__A : List[str] = encoder_layers
__A : str = auxiliary_loss
__A : Union[str, Any] = position_embedding_type
__A : Optional[int] = backbone
__A : List[str] = use_pretrained_backbone
__A : List[Any] = dilation
# Hungarian matcher
__A : List[str] = class_cost
__A : Optional[int] = bbox_cost
__A : Dict = giou_cost
# Loss coefficients
__A : Optional[int] = mask_loss_coefficient
__A : Union[str, Any] = dice_loss_coefficient
__A : List[Any] = cls_loss_coefficient
__A : Dict = bbox_loss_coefficient
__A : Tuple = giou_loss_coefficient
__A : Tuple = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCAmelCase_ ( self ):
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ):
return self.d_model
def UpperCAmelCase_ ( self ):
__A : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__A : Dict = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCAmelCase_ ( self ):
return 1e-5
@property
def UpperCAmelCase_ ( self ):
return 12
| 77 | 0 |
import requests
from bsa import BeautifulSoup
def _SCREAMING_SNAKE_CASE ( a = "AAPL" ) -> str:
__A : int = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__A : Any = BeautifulSoup(requests.get(a ).text , 'html.parser' )
__A : Optional[int] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 721 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _A( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
__A : List[str] = nn.Linear(3 , 4 )
__A : Optional[Any] = nn.BatchNormad(4 )
__A : List[Any] = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self , _A ):
return self.lineara(self.batchnorm(self.lineara(_A ) ) )
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , model.state_dict() )
__A : str = os.path.join(_A , 'index.json' )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__A : Optional[int] = os.path.join(_A , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase_ ( self ):
__A : Dict = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__A : Tuple = torch.randn(2 , 3 , dtype=_A )
with TemporaryDirectory() as tmp_dir:
__A : int = offload_weight(_A , 'weight' , _A , {} )
__A : Union[str, Any] = os.path.join(_A , 'weight.dat' )
self.assertTrue(os.path.isfile(_A ) )
self.assertDictEqual(_A , {'weight': {'shape': [2, 3], 'dtype': str(_A ).split('.' )[1]}} )
__A : List[str] = load_offloaded_weight(_A , index['weight'] )
self.assertTrue(torch.equal(_A , _A ) )
def UpperCAmelCase_ ( self ):
__A : int = ModelForTest()
__A : Union[str, Any] = model.state_dict()
__A : Optional[Any] = {k: v for k, v in state_dict.items() if 'linear2' not in k}
__A : str = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : List[str] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
__A : Union[str, Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
__A : List[Any] = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : Optional[int] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
# Duplicates are removed
__A : str = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
def UpperCAmelCase_ ( self ):
__A : Dict = {'a.1': 0, 'a.10': 1, 'a.2': 2}
__A : str = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1': 0, 'a.2': 2} )
__A : Optional[Any] = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
__A : Any = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1.a': 0, 'a.2.a': 2} )
| 77 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = Dict[str, Any]
UpperCAmelCase : int = List[Prediction]
@add_end_docstrings(snake_case__ )
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
super().__init__(*_A , **_A )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCAmelCase_ ( self , **_A ):
__A : Tuple = {}
if "threshold" in kwargs:
__A : List[Any] = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *_A , **_A ):
return super().__call__(*_A , **_A )
def UpperCAmelCase_ ( self , _A ):
__A : List[str] = load_image(_A )
__A : Any = torch.IntTensor([[image.height, image.width]] )
__A : Optional[int] = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
__A : str = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
__A : Any = target_size
return inputs
def UpperCAmelCase_ ( self , _A ):
__A : Any = model_inputs.pop('target_size' )
__A : Tuple = self.model(**_A )
__A : Optional[Any] = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
__A : List[str] = model_inputs['bbox']
return model_outputs
def UpperCAmelCase_ ( self , _A , _A=0.9 ):
__A : str = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__A : List[Any] = target_size[0].tolist()
def unnormalize(_A ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__A : Union[str, Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__A : List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__A : Optional[Any] = [unnormalize(_A ) for bbox in model_outputs['bbox'].squeeze(0 )]
__A : Union[str, Any] = ['score', 'label', 'box']
__A : str = [dict(zip(_A , _A ) ) for vals in zip(scores.tolist() , _A , _A ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__A : List[Any] = self.image_processor.post_process_object_detection(_A , _A , _A )
__A : Tuple = raw_annotations[0]
__A : int = raw_annotation['scores']
__A : Optional[int] = raw_annotation['labels']
__A : Union[str, Any] = raw_annotation['boxes']
__A : Optional[int] = scores.tolist()
__A : str = [self.model.config.idalabel[label.item()] for label in labels]
__A : str = [self._get_bounding_box(_A ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__A : Tuple = ['score', 'label', 'box']
__A : str = [
dict(zip(_A , _A ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def UpperCAmelCase_ ( self , _A ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
__A : Union[str, Any] = box.int().tolist()
__A : Union[str, Any] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A ):
__A : Any = data
def __iter__( self ):
for element in self.data:
yield element
def _SCREAMING_SNAKE_CASE ( a=True ) -> Any:
__A : List[Any] = Accelerator(even_batches=a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _SCREAMING_SNAKE_CASE ( a , a , a , a = False ) -> str:
if iterable:
__A : int = DummyIterableDataset(torch.as_tensor(range(a ) ) )
else:
__A : Optional[Any] = TensorDataset(torch.as_tensor(range(a ) ) )
__A : Optional[Any] = DataLoader(a , batch_size=a )
__A : Optional[int] = accelerator.prepare(a )
return dl
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a , ) -> Union[str, Any]:
__A : Optional[int] = create_dataloader(accelerator=a , dataset_size=a , batch_size=a )
__A : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : int = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : str = create_accelerator(even_batches=a )
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : Optional[Any] = create_accelerator(even_batches=a )
__A : str = torch.nn.Linear(1 , 1 )
__A : Optional[int] = accelerator.prepare(a )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a ):
__A : Dict = ddp_model(batch[0].float() )
__A : List[str] = output.sum()
loss.backward()
batch_idxs.append(a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for multi-GPU" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : int = True
__A : Union[str, Any] = False
__A : Optional[int] = create_accelerator(even_batches=a )
__A : int = torch.nn.Linear(1 , 1 )
__A : List[Any] = accelerator.prepare(a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : List[str] = train_dl.batch_sampler.even_batches
__A : Dict = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Any = True
__A : List[Any] = False
__A : Tuple = create_accelerator(even_batches=a )
__A : List[str] = torch.nn.Linear(1 , 1 )
__A : Optional[Any] = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : Tuple = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__A : Any = create_accelerator()
__A : Union[str, Any] = torch.nn.Linear(1 , 1 )
__A : str = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for map-style datasets" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : str = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
__A : int = accelerator.state.distributed_type
__A : Tuple = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a )
__A : str = original_state
if __name__ == "__main__":
main()
| 77 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Tuple = tempfile.mkdtemp()
# fmt: off
__A : Union[str, Any] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Dict = dict(zip(_A , range(len(_A ) ) ) )
__A : int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : Optional[Any] = {'unk_token': '<unk>'}
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : Union[str, Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : List[str] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_tokenizer()
__A : Dict = self.get_rust_tokenizer()
__A : Optional[Any] = self.get_image_processor()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Any = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : int = self.get_image_processor(do_normalize=_A )
__A : int = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : List[Any] = self.prepare_image_inputs()
__A : Any = image_processor(_A , return_tensors='np' )
__A : Tuple = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = 'lower newer'
__A : Any = processor(text=_A , return_tensors='np' )
__A : Dict = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Tuple = 'lower newer'
__A : Union[str, Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Any = ['cat', 'nasa badge']
__A : List[Any] = processor(text=_A )
__A : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : int = [['cat', 'nasa badge'], ['person']]
__A : str = processor(text=_A )
__A : int = 16
__A : Optional[int] = len(_A )
__A : int = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : int = 'google/owlvit-base-patch32'
__A : List[str] = OwlViTProcessor.from_pretrained(_A )
__A : Tuple = ['cat', 'nasa badge']
__A : Dict = processor(text=_A )
__A : Tuple = 16
__A : str = inputs['input_ids']
__A : str = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Dict = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = self.prepare_image_inputs()
__A : Tuple = self.prepare_image_inputs()
__A : Any = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Union[str, Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 701 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : str = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = '''codegen'''
UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _A=50400 , _A=2048 , _A=2048 , _A=4096 , _A=28 , _A=16 , _A=64 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.0_2 , _A=True , _A=50256 , _A=50256 , _A=False , **_A , ):
__A : Any = vocab_size
__A : Tuple = n_ctx
__A : Union[str, Any] = n_positions
__A : Optional[Any] = n_embd
__A : Any = n_layer
__A : Dict = n_head
__A : Union[str, Any] = n_inner
__A : List[Any] = rotary_dim
__A : str = activation_function
__A : Any = resid_pdrop
__A : Tuple = embd_pdrop
__A : Tuple = attn_pdrop
__A : Union[str, Any] = layer_norm_epsilon
__A : str = initializer_range
__A : Optional[Any] = use_cache
__A : Union[str, Any] = bos_token_id
__A : Tuple = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , 'pad_token_id' , _A ):
# TODO: how to do that better?
__A : Dict = 0
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
__A : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__A : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self ):
return self._config.n_layer
@property
def UpperCAmelCase_ ( self ):
return self._config.n_head
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : Any = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__A : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__A : Any = seqlen + 2
__A : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A : Optional[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__A : Tuple = common_inputs['attention_mask']
if self.use_past:
__A : str = ordered_inputs['attention_mask'].dtype
__A : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self ):
return 13
| 77 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {'''vocab_file''': '''spiece.model'''}
UpperCAmelCase : int = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
UpperCAmelCase : str = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : str = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A=False , _A=False , _A=False , _A=None , _A=None , _A=None , _A=None , _A = None , **_A , ):
__A : Any = {} if sp_model_kwargs is None else sp_model_kwargs
__A : str = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
__A : Any = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__A : int = '<|endoftext|>' if eos_token is None else eos_token
__A : Any = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__A : Optional[int] = unk_token if pad_token is None else pad_token
__A : List[Any] = eos_token if bos_token is None else bos_token
else:
__A : List[str] = '<pad>' if pad_token is None else pad_token
__A : Optional[int] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__A : Union[str, Any] = do_lower_case
__A : int = remove_space
__A : Any = keep_accents
__A : str = vocab_file
__A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# Used for whitespace normalization in input texts
# fmt : off
__A : str = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__A : Union[str, Any] = re.compile(
F"""[{"".join(map(_A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ):
__A : List[str] = self.__dict__.copy()
__A : Tuple = None
return state
def __setstate__( self , _A ):
__A : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : List[Any] = {}
__A : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase_ ( self ):
return len(self.sp_model )
def UpperCAmelCase_ ( self , _A ):
__A : Optional[Any] = self.non_printing_characters_re.sub('' , _A )
# Normalize whitespaces
__A : str = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
__A : Optional[Any] = unicodedata.normalize('NFC' , _A )
return text
def UpperCAmelCase_ ( self , _A , **_A ):
__A : List[str] = self.preprocess_text(_A )
return self.sp_model.encode(_A , out_type=_A )
def UpperCAmelCase_ ( self , _A ):
return self.sp_model.PieceToId(_A )
def UpperCAmelCase_ ( self , _A ):
return self.sp_model.IdToPiece(_A )
@staticmethod
def UpperCAmelCase_ ( _A ):
return out_string
def UpperCAmelCase_ ( self , _A ):
__A : int = []
__A : Optional[int] = ''
__A : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
__A : Tuple = True
__A : int = []
else:
current_sub_tokens.append(_A )
__A : List[str] = False
out_string += self.sp_model.decode(_A )
return out_string
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self , _A , _A = None ):
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : List[str] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , 'wb' ) as fi:
__A : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def UpperCAmelCase_ ( self , _A , _A = False ):
if isinstance(_A , _A ):
__A : Union[str, Any] = self.preprocess_text(_A )
__A : Optional[Any] = self.sp_model.encode(_A )
else:
__A : Any = [self.preprocess_text(_A ) for t in text]
__A : str = self.sp_model.encode(_A )
if return_tensors is True or return_tensors == "pt":
__A : Dict = torch.tensor(_A )
return token_ids
def UpperCAmelCase_ ( self , _A ):
return self.sp_model.decode(_A )
def UpperCAmelCase_ ( self , _A ):
__A : Tuple = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__A : Any = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(_A ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=_A )
| 702 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 77 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Optional[int] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase : List[Any] = {
'''allenai/led-base-16384''': 1_63_84,
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Any = LEDTokenizer
UpperCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , _A=None , _A=None , _A=None , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , _A=True , **_A , ):
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
__A : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _A ) != add_prefix_space:
__A : Dict = getattr(_A , pre_tok_state.pop('type' ) )
__A : Tuple = add_prefix_space
__A : Optional[int] = pre_tok_class(**_A )
__A : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__A : Union[str, Any] = 'post_processor'
__A : List[str] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
__A : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__A : List[str] = tuple(state['sep'] )
if "cls" in state:
__A : List[Any] = tuple(state['cls'] )
__A : List[Any] = False
if state.get('add_prefix_space' , _A ) != add_prefix_space:
__A : Optional[int] = add_prefix_space
__A : List[Any] = True
if state.get('trim_offsets' , _A ) != trim_offsets:
__A : Union[str, Any] = trim_offsets
__A : List[Any] = True
if changes_to_apply:
__A : Tuple = getattr(_A , state.pop('type' ) )
__A : List[str] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCAmelCase_ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
__A : List[str] = value
def UpperCAmelCase_ ( self , *_A , **_A ):
__A : str = kwargs.get('is_split_into_words' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_A , **_A )
def UpperCAmelCase_ ( self , *_A , **_A ):
__A : List[Any] = kwargs.get('is_split_into_words' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_A , **_A )
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : List[str] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def UpperCAmelCase_ ( self , _A , _A=None ):
__A : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Optional[Any] = [self.sep_token_id]
__A : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self , _A , _A = None , _A = PaddingStrategy.DO_NOT_PAD , _A = None , _A = None , ):
__A : Dict = super()._pad(
encoded_inputs=_A , max_length=_A , padding_strategy=_A , pad_to_multiple_of=_A , return_attention_mask=_A , )
# Load from model defaults
if return_attention_mask is None:
__A : Optional[int] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__A : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__A : List[str] = len(encoded_inputs['global_attention_mask'] ) != len(_A )
if needs_to_be_padded:
__A : Tuple = len(_A ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__A : Union[str, Any] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
__A : Optional[int] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 703 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : Union[str, Any] = ''''''
UpperCAmelCase : Optional[int] = ''''''
UpperCAmelCase : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ) -> None:
__A , __A : List[Any] = get_dataset(a , a )
print('Processing...' )
__A , __A , __A : Optional[Any] = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__A : Optional[int] = random_chars(32 )
__A : Dict = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__A : Dict = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(a )} with {file_name}""" )
__A : int = []
for anno in new_annos[index]:
__A : Any = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a )
with open(F"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> tuple[list, list]:
__A : int = []
__A : List[Any] = []
for label_file in glob.glob(os.path.join(a , '*.txt' ) ):
__A : List[str] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(a ) as in_file:
__A : Tuple = in_file.readlines()
__A : Dict = os.path.join(a , F"""{label_name}.jpg""" )
__A : Dict = []
for obj_list in obj_lists:
__A : int = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( a , a , a = 1 ) -> tuple[list, list, list]:
__A : int = []
__A : Optional[Any] = []
__A : Dict = []
for idx in range(len(a ) ):
__A : Dict = []
__A : Optional[Any] = img_list[idx]
path_list.append(a )
__A : Union[str, Any] = anno_list[idx]
__A : Optional[Any] = cva.imread(a )
if flip_type == 1:
__A : Any = cva.flip(a , a )
for bbox in img_annos:
__A : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__A : Union[str, Any] = cva.flip(a , a )
for bbox in img_annos:
__A : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( a = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__A : List[Any] = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 77 | 0 |
import re
def _SCREAMING_SNAKE_CASE ( a ) -> list:
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : Optional[Any] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> str:
try:
__A : Tuple = split_input(a )
if upper:
__A : Optional[int] = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__A : Dict = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _SCREAMING_SNAKE_CASE ( a ) -> str:
return to_simple_case(a )
def _SCREAMING_SNAKE_CASE ( a ) -> str:
try:
__A : str = to_simple_case(a )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
return to_complex_case(a , a , '_' )
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
return to_complex_case(a , a , '-' )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 704 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
__A : Union[str, Any] = parent
__A : List[str] = batch_size
__A : Optional[int] = seq_length
__A : List[Any] = is_training
__A : Optional[Any] = use_input_mask
__A : List[Any] = use_token_type_ids
__A : Optional[Any] = use_labels
__A : List[str] = vocab_size
__A : Optional[int] = hidden_size
__A : List[Any] = num_hidden_layers
__A : int = num_attention_heads
__A : Dict = intermediate_size
__A : Any = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Dict = type_vocab_size
__A : Any = type_sequence_label_size
__A : Dict = initializer_range
__A : str = num_labels
__A : Union[str, Any] = num_choices
__A : str = scope
def UpperCAmelCase_ ( self ):
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_input_mask:
__A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = None
__A : List[Any] = None
__A : List[Any] = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : List[str] = LlamaModel(config=_A )
model.to(_A )
model.eval()
__A : Any = model(_A , attention_mask=_A )
__A : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Dict = True
__A : int = LlamaModel(_A )
model.to(_A )
model.eval()
__A : str = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
__A : List[Any] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Optional[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : int = True
__A : List[Any] = True
__A : List[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__A : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
__A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : str = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0]
__A : Union[str, Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Tuple = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : int = False
UpperCamelCase : Dict = False
def UpperCAmelCase_ ( self ):
__A : List[Any] = LlamaModelTester(self )
__A : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : int = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : Optional[int] = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(_A )
__A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = 3
__A : Tuple = 'single_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[int] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : int = 'multi_label_classification'
__A : int = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : List[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def UpperCAmelCase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCAmelCase_ ( self , _A ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = ids_tensor([1, 10] , config.vocab_size )
__A : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = LlamaModel(_A )
original_model.to(_A )
original_model.eval()
__A : Dict = original_model(_A ).last_hidden_state
__A : int = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : int = {'type': scaling_type, 'factor': 1_0.0}
__A : str = LlamaModel(_A )
scaled_model.to(_A )
scaled_model.eval()
__A : Dict = scaled_model(_A ).last_hidden_state
__A : str = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
__A : Union[str, Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__A : Optional[int] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : str = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[str] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
__A : int = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : List[str] = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
__A : Optional[int] = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : Optional[Any] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
__A : List[Any] = model(torch.tensor(_A ) )
__A : Tuple = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# fmt: off
__A : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
__A : List[str] = 'Simply put, the theory of relativity states that '
__A : Union[str, Any] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
__A : List[str] = tokenizer.encode(_A , return_tensors='pt' )
__A : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_A )
# greedy generation outputs
__A : Union[str, Any] = model.generate(_A , max_new_tokens=64 , top_p=_A , temperature=1 , do_sample=_A )
__A : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
| 77 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a ) -> List[List[ImageInput]]:
if isinstance(a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(a ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Dict = ['''pixel_values''']
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = None , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , **_A , ):
super().__init__(**_A )
__A : Optional[Any] = size if size is not None else {'shortest_edge': 224}
__A : Dict = get_size_dict(_A , default_to_square=_A )
__A : Dict = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__A : Any = get_size_dict(_A , param_name='crop_size' )
__A : Union[str, Any] = do_resize
__A : List[Any] = size
__A : List[str] = do_center_crop
__A : List[Any] = crop_size
__A : Any = resample
__A : Tuple = do_rescale
__A : Tuple = rescale_factor
__A : Optional[int] = do_normalize
__A : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ):
__A : Dict = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
__A : Tuple = get_resize_output_image_size(_A , size['shortest_edge'] , default_to_square=_A )
elif "height" in size and "width" in size:
__A : List[Any] = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self , _A , _A , _A = None , **_A , ):
__A : Any = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCAmelCase_ ( self , _A , _A , _A = None , **_A , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self , _A , _A , _A , _A = None , **_A , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__A : Any = to_numpy_array(_A )
if do_resize:
__A : Union[str, Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
__A : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
__A : str = self.rescale(image=_A , scale=_A )
if do_normalize:
__A : Optional[Any] = self.normalize(image=_A , mean=_A , std=_A )
__A : int = to_channel_dimension_format(_A , _A )
return image
def UpperCAmelCase_ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
__A : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__A : List[str] = resample if resample is not None else self.resample
__A : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : Any = do_rescale if do_rescale is not None else self.do_rescale
__A : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__A : List[Any] = image_mean if image_mean is not None else self.image_mean
__A : Optional[int] = image_std if image_std is not None else self.image_std
__A : List[str] = size if size is not None else self.size
__A : Any = get_size_dict(_A , default_to_square=_A )
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : List[Any] = get_size_dict(_A , param_name='crop_size' )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__A : int = make_batched(_A )
__A : str = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
__A : Optional[Any] = {'pixel_values': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 705 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase : str = HfApi()
UpperCAmelCase : List[str] = {}
# fmt: off
UpperCAmelCase : Optional[Any] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCAmelCase : Dict = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCAmelCase : str = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCAmelCase : Optional[Any] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCAmelCase : List[Any] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCAmelCase : Optional[int] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCAmelCase : Tuple = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCAmelCase : Any = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCAmelCase : Tuple = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCAmelCase : Dict = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCAmelCase : Tuple = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCAmelCase : List[str] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCAmelCase : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase : Union[str, Any] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
UpperCAmelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
UpperCAmelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase : int = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase : Optional[int] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase : Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 77 | 0 |
import requests
UpperCAmelCase : Dict = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def _SCREAMING_SNAKE_CASE ( a ) -> None:
# fetching a list of articles in json format
__A : Any = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 706 |
import numpy as np
from PIL import Image
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> np.ndarray:
__A : Union[str, Any] = np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__A : List[Any] = 0
__A : Optional[Any] = 0
__A : List[Any] = 0
__A : Dict = 0
# compute the shape of the output matrix
__A : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__A : Optional[int] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__A : Tuple = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A : List[str] = 0
__A : Union[str, Any] = 0
return updated_arr
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> np.ndarray:
__A : List[Any] = np.array(a )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__A : Dict = 0
__A : str = 0
__A : Tuple = 0
__A : Optional[int] = 0
# compute the shape of the output matrix
__A : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__A : Any = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__A : Tuple = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A : Dict = 0
__A : int = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
UpperCAmelCase : int = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 77 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Dict = '''realm'''
def __init__( self , _A=30522 , _A=768 , _A=128 , _A=12 , _A=12 , _A=8 , _A=3072 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1e-1_2 , _A=256 , _A=10 , _A=1e-3 , _A=5 , _A=320 , _A=13353718 , _A=5000 , _A=1 , _A=0 , _A=2 , **_A , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
# Common config
__A : Union[str, Any] = vocab_size
__A : Optional[int] = max_position_embeddings
__A : Dict = hidden_size
__A : Tuple = retriever_proj_size
__A : List[Any] = num_hidden_layers
__A : Tuple = num_attention_heads
__A : Dict = num_candidates
__A : Dict = intermediate_size
__A : int = hidden_act
__A : List[str] = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : Union[str, Any] = initializer_range
__A : Any = type_vocab_size
__A : Dict = layer_norm_eps
# Reader config
__A : int = span_hidden_size
__A : Union[str, Any] = max_span_width
__A : List[Any] = reader_layer_norm_eps
__A : str = reader_beam_size
__A : Any = reader_seq_len
# Retrieval config
__A : Optional[Any] = num_block_records
__A : int = searcher_beam_size
| 707 |
from __future__ import annotations
from collections.abc import Callable
def _SCREAMING_SNAKE_CASE ( a , a , a , a = 1_00 , ) -> float:
__A : Any = x_start
__A : List[str] = fnc(a )
__A : Optional[Any] = 0.0
for _ in range(a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__A : Any = (x_end - x_start) / steps + xa
__A : List[str] = fnc(a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__A : Any = xa
__A : Dict = fxa
return area
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
UpperCAmelCase : Tuple = 10
while i <= 10_00_00:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 77 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _SCREAMING_SNAKE_CASE ( ) -> None:
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[tuple[int, int], tuple[int, int]]:
print('Generating prime p...' )
__A : Optional[Any] = rabinMiller.generate_large_prime(a )
print('Generating prime q...' )
__A : Union[str, Any] = rabinMiller.generate_large_prime(a )
__A : Tuple = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
__A : Dict = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(a , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
__A : Any = cryptoMath.find_mod_inverse(a , (p - 1) * (q - 1) )
__A : Dict = (n, e)
__A : Dict = (n, d)
return (public_key, private_key)
def _SCREAMING_SNAKE_CASE ( a , a ) -> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__A , __A : Optional[int] = generate_key(a )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : Any = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _SCREAMING_SNAKE_CASE ( a ) -> dict[str, str]:
__A : Optional[Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__A : List[Any] = remove_duplicates(key.upper() )
__A : Optional[int] = len(a )
# First fill cipher with key characters
__A : Optional[int] = {alphabet[i]: char for i, char in enumerate(a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(a ) , 26 ):
__A : Optional[int] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__A : Tuple = alphabet[i - offset]
__A : Tuple = char
return cipher_alphabet
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
return "".join(cipher_map.get(a , a ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
__A : int = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(a , a ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( ) -> None:
__A : List[Any] = input('Enter message to encode or decode: ' ).strip()
__A : Optional[Any] = input('Enter keyword: ' ).strip()
__A : Union[str, Any] = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
__A : Union[str, Any] = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
__A : Any = create_cipher_map(a )
print(func(a , a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = ProphetNetTokenizer
UpperCamelCase : Tuple = False
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = 'UNwant\u00E9d,running'
__A : List[str] = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self ):
__A : Tuple = self.tokenizer_class(self.vocab_file )
__A : List[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase_ ( self ):
__A : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCAmelCase_ ( self ):
__A : List[str] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : Dict = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
__A : List[Any] = BasicTokenizer(do_lower_case=_A , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__A : Optional[int] = {}
for i, token in enumerate(_A ):
__A : Tuple = i
__A : Tuple = WordpieceTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def UpperCAmelCase_ ( self ):
__A : int = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__A : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__A : str = tokenizer(_A , padding=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
__A : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCAmelCase_ ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__A : Any = tokenizer.encode('sequence builders' , add_special_tokens=_A )
__A : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_A )
__A : str = tokenizer.build_inputs_with_special_tokens(_A )
__A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 77 | 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
__A : Tuple = parent
__A : Optional[Any] = batch_size
__A : int = seq_length
__A : Dict = is_training
__A : List[str] = use_input_mask
__A : List[Any] = use_token_type_ids
__A : List[Any] = use_labels
__A : Optional[Any] = vocab_size
__A : int = hidden_size
__A : List[Any] = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : Tuple = intermediate_size
__A : Optional[Any] = hidden_act
__A : Any = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Union[str, Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : List[Any] = type_sequence_label_size
__A : Any = initializer_range
__A : Any = num_labels
__A : Optional[Any] = num_choices
__A : Any = scope
def UpperCAmelCase_ ( self ):
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : List[str] = None
if self.use_input_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Tuple = None
if self.use_token_type_ids:
__A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = None
__A : Union[str, Any] = None
__A : str = None
if self.use_labels:
__A : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : str = ids_tensor([self.batch_size] , self.num_choices )
__A : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : int = BioGptModel(config=_A )
model.to(_A )
model.eval()
__A : Optional[Any] = model(_A , attention_mask=_A )
__A : Tuple = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Any = BioGptForCausalLM(config=_A )
model.to(_A )
model.eval()
__A : Union[str, Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , *_A ):
__A : Any = BioGptModel(config=_A )
model.to(_A )
model.eval()
# create attention mask
__A : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
__A : Optional[int] = self.seq_length // 2
__A : List[Any] = 0
# first forward pass
__A : Optional[int] = model(_A , attention_mask=_A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__A : Dict = ids_tensor((1,) , _A ).item() + 1
__A : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__A : int = random_other_next_tokens
# append to next input_ids and attn_mask
__A : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_A )] , dim=1 , )
# get two different outputs
__A : Optional[Any] = model(_A , attention_mask=_A )['last_hidden_state']
__A : Any = model(_A , past_key_values=_A , attention_mask=_A )['last_hidden_state']
# select random slice
__A : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : int = output_from_no_past[:, -1, random_slice_idx].detach()
__A : Dict = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , *_A ):
__A : str = BioGptModel(config=_A ).to(_A ).eval()
__A : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
# first forward pass
__A : Optional[Any] = model(_A , attention_mask=_A , use_cache=_A )
__A : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__A : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__A : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__A : Union[str, Any] = model(_A , attention_mask=_A )['last_hidden_state']
__A : Any = model(_A , attention_mask=_A , past_key_values=_A )[
'last_hidden_state'
]
# select random slice
__A : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , *_A , _A=False ):
__A : int = BioGptForCausalLM(_A )
model.to(_A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__A : List[Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase_ ( self , _A , *_A ):
__A : List[str] = BioGptModel(_A )
__A : Optional[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , *_A ):
__A : List[str] = self.num_labels
__A : List[Any] = BioGptForTokenClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.prepare_config_and_inputs()
(
__A
) : Any = config_and_inputs
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase : Union[str, Any] = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase : List[Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
def UpperCAmelCase_ ( self ):
__A : Optional[int] = BioGptModelTester(self )
__A : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : int = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_A )
def UpperCAmelCase_ ( self ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_A , gradient_checkpointing=_A )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_A )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_A )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_A )
@slow
def UpperCAmelCase_ ( self ):
__A : List[str] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(_A )
__A : Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__A : Dict = 'left'
# Define PAD Token = EOS Token = 50256
__A : int = tokenizer.eos_token
__A : Any = model.config.eos_token_id
# use different length sentences to test batching
__A : Optional[Any] = [
'Hello, my dog is a little',
'Today, I',
]
__A : int = tokenizer(_A , return_tensors='pt' , padding=_A )
__A : Dict = inputs['input_ids'].to(_A )
__A : Tuple = model.generate(
input_ids=_A , attention_mask=inputs['attention_mask'].to(_A ) , )
__A : int = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(_A )
__A : Optional[int] = model.generate(input_ids=_A )
__A : Any = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
__A : Optional[int] = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(_A )
__A : Optional[int] = model.generate(input_ids=_A , max_length=model.config.max_length - num_paddings )
__A : Any = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__A : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A )
__A : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=_A )
__A : List[Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase_ ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[str] = BioGptModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = 3
__A : List[Any] = input_dict['input_ids']
__A : Optional[int] = input_ids.ne(1 ).to(_A )
__A : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[int] = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : List[str] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[Any] = 3
__A : List[Any] = 'multi_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(_A )
__A : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : Dict = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Dict = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
__A : int = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
__A : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
__A : List[str] = model(_A )[0]
__A : Tuple = 42384
__A : int = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _A )
__A : Union[str, Any] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 10.4557], [-11.0469, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
__A : List[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__A : List[str] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(_A )
torch.manual_seed(0 )
__A : Any = tokenizer('COVID-19 is' , return_tensors='pt' ).to(_A )
__A : Dict = model.generate(
**_A , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_A , )
__A : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=_A )
__A : Dict = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(_A , _A )
| 710 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Any = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase : Optional[int] = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
UpperCAmelCase : List[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[str] = BertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
__A : Any = getattr(_A , normalizer_state.pop('type' ) )
__A : Union[str, Any] = do_lower_case
__A : Optional[int] = strip_accents
__A : List[Any] = tokenize_chinese_chars
__A : int = normalizer_class(**_A )
__A : Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self , _A , _A=None ):
__A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Optional[Any] = [self.sep_token_id]
__A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 77 | 0 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 711 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
debug_launcher(test_script.main )
def UpperCAmelCase_ ( self ):
debug_launcher(test_ops.main )
| 77 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
@is_pipeline_test
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCAmelCase_ ( self ):
__A : Dict = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : Tuple = image_classifier(_A , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_A ) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
__A : Optional[int] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(_A ) , [
[
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
],
[
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
],
[
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
],
[
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
],
[
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
],
] , )
@require_tf
def UpperCAmelCase_ ( self ):
__A : Any = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
__A : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : int = image_classifier(_A , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(_A ) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
__A : Optional[int] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(_A ) , [
[
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
],
[
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
],
[
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
],
[
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
],
[
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
{'score': 0.3_3_3, 'label': ANY(_A )},
],
] , )
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
__A : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : Dict = image_classifier(_A , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_A ) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
__A : Optional[int] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(_A ) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase_ ( self ):
__A : str = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__A : Optional[int] = image_classifier(_A , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_A ) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
__A : str = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(_A ) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 712 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Tuple = tempfile.mkdtemp()
# fmt: off
__A : Union[str, Any] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Dict = dict(zip(_A , range(len(_A ) ) ) )
__A : int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : Optional[Any] = {'unk_token': '<unk>'}
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : Union[str, Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : List[str] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_tokenizer()
__A : Dict = self.get_rust_tokenizer()
__A : Optional[Any] = self.get_image_processor()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Any = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : int = self.get_image_processor(do_normalize=_A )
__A : int = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : List[Any] = self.prepare_image_inputs()
__A : Any = image_processor(_A , return_tensors='np' )
__A : Tuple = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = 'lower newer'
__A : Any = processor(text=_A , return_tensors='np' )
__A : Dict = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Tuple = 'lower newer'
__A : Union[str, Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Any = ['cat', 'nasa badge']
__A : List[Any] = processor(text=_A )
__A : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : int = [['cat', 'nasa badge'], ['person']]
__A : str = processor(text=_A )
__A : int = 16
__A : Optional[int] = len(_A )
__A : int = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : int = 'google/owlvit-base-patch32'
__A : List[str] = OwlViTProcessor.from_pretrained(_A )
__A : Tuple = ['cat', 'nasa badge']
__A : Dict = processor(text=_A )
__A : Tuple = 16
__A : str = inputs['input_ids']
__A : str = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Dict = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = self.prepare_image_inputs()
__A : Tuple = self.prepare_image_inputs()
__A : Any = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Union[str, Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 77 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Tuple:
for attribute in key.split('.' ):
__A : Dict = getattr(a , a )
if weight_type is not None:
__A : Any = getattr(a , a ).shape
else:
__A : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__A : Union[str, Any] = value
elif weight_type == "weight_g":
__A : Dict = value
elif weight_type == "weight_v":
__A : Optional[int] = value
elif weight_type == "bias":
__A : int = value
elif weight_type == "running_mean":
__A : Union[str, Any] = value
elif weight_type == "running_var":
__A : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
__A : Any = value
elif weight_type == "inv_freq":
__A : Optional[Any] = value
else:
__A : int = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Union[str, Any]:
__A : Any = []
__A : Optional[int] = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__A : int = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__A : Any = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : Optional[Any] = True
if "*" in mapped_key:
__A : str = name.split(a )[0].split('.' )[-2]
__A : int = mapped_key.replace('*' , a )
if "pos_bias_u" in name:
__A : Optional[int] = None
elif "pos_bias_v" in name:
__A : Dict = None
elif "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Dict = 'weight_v'
elif "bias" in name:
__A : Tuple = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : int = 'weight'
elif "running_mean" in name:
__A : str = 'running_mean'
elif "inv_freq" in name:
__A : List[Any] = 'inv_freq'
elif "running_var" in name:
__A : Union[str, Any] = 'running_var'
elif "num_batches_tracked" in name:
__A : Optional[Any] = 'num_batches_tracked'
else:
__A : List[str] = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Any:
__A : str = full_name.split('conv_layers.' )[-1]
__A : str = name.split('.' )
__A : Dict = int(items[0] )
__A : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__A : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__A : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a , a=None , a=None , a=True ) -> Any:
if config_path is not None:
__A : Tuple = WavaVecaConformerConfig.from_pretrained(a , hidden_act='swish' )
else:
__A : Optional[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__A : Dict = 'rotary'
if is_finetuned:
if dict_path:
__A : Dict = Dictionary.load(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A : int = target_dict.pad_index
__A : List[Any] = target_dict.bos_index
__A : Any = target_dict.eos_index
__A : Dict = len(target_dict.symbols )
__A : Optional[Any] = os.path.join(a , 'vocab.json' )
if not os.path.isdir(a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(a ) )
return
os.makedirs(a , exist_ok=a )
__A : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__A : int = 0
__A : Optional[Any] = 1
with open(a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(a , a )
__A : Optional[Any] = WavaVecaCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=a , )
__A : Tuple = True if config.feat_extract_norm == 'layer' else False
__A : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
__A : Optional[int] = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
__A : List[Any] = WavaVecaConformerForCTC(a )
else:
__A : List[Any] = WavaVecaConformerForPreTraining(a )
if is_finetuned:
__A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__A : Optional[Any] = argparse.Namespace(task='audio_pretraining' )
__A : str = fairseq.tasks.setup_task(a )
__A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a )
__A : Tuple = model[0].eval()
recursively_load_weights(a , a , not is_finetuned )
hf_wavavec.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 713 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Tuple:
for attribute in key.split('.' ):
__A : Dict = getattr(a , a )
if weight_type is not None:
__A : Any = getattr(a , a ).shape
else:
__A : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__A : Union[str, Any] = value
elif weight_type == "weight_g":
__A : Dict = value
elif weight_type == "weight_v":
__A : Optional[int] = value
elif weight_type == "bias":
__A : int = value
elif weight_type == "running_mean":
__A : Union[str, Any] = value
elif weight_type == "running_var":
__A : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
__A : Any = value
elif weight_type == "inv_freq":
__A : Optional[Any] = value
else:
__A : int = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Union[str, Any]:
__A : Any = []
__A : Optional[int] = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__A : int = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__A : Any = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : Optional[Any] = True
if "*" in mapped_key:
__A : str = name.split(a )[0].split('.' )[-2]
__A : int = mapped_key.replace('*' , a )
if "pos_bias_u" in name:
__A : Optional[int] = None
elif "pos_bias_v" in name:
__A : Dict = None
elif "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Dict = 'weight_v'
elif "bias" in name:
__A : Tuple = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : int = 'weight'
elif "running_mean" in name:
__A : str = 'running_mean'
elif "inv_freq" in name:
__A : List[Any] = 'inv_freq'
elif "running_var" in name:
__A : Union[str, Any] = 'running_var'
elif "num_batches_tracked" in name:
__A : Optional[Any] = 'num_batches_tracked'
else:
__A : List[str] = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Any:
__A : str = full_name.split('conv_layers.' )[-1]
__A : str = name.split('.' )
__A : Dict = int(items[0] )
__A : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__A : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__A : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__A : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a , a=None , a=None , a=True ) -> Any:
if config_path is not None:
__A : Tuple = WavaVecaConformerConfig.from_pretrained(a , hidden_act='swish' )
else:
__A : Optional[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__A : Dict = 'rotary'
if is_finetuned:
if dict_path:
__A : Dict = Dictionary.load(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A : int = target_dict.pad_index
__A : List[Any] = target_dict.bos_index
__A : Any = target_dict.eos_index
__A : Dict = len(target_dict.symbols )
__A : Optional[Any] = os.path.join(a , 'vocab.json' )
if not os.path.isdir(a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(a ) )
return
os.makedirs(a , exist_ok=a )
__A : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__A : int = 0
__A : Optional[Any] = 1
with open(a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(a , a )
__A : Optional[Any] = WavaVecaCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=a , )
__A : Tuple = True if config.feat_extract_norm == 'layer' else False
__A : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
__A : Optional[int] = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
__A : List[Any] = WavaVecaConformerForCTC(a )
else:
__A : List[Any] = WavaVecaConformerForPreTraining(a )
if is_finetuned:
__A , __A , __A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__A : Optional[Any] = argparse.Namespace(task='audio_pretraining' )
__A : str = fairseq.tasks.setup_task(a )
__A , __A , __A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a )
__A : Tuple = model[0].eval()
recursively_load_weights(a , a , not is_finetuned )
hf_wavavec.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 77 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , a , )
if isinstance(a , torch.Tensor ):
return image
elif isinstance(a , PIL.Image.Image ):
__A : Dict = [image]
if isinstance(image[0] , PIL.Image.Image ):
__A : str = image[0].size
__A : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__A : Dict = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__A : Tuple = np.concatenate(a , axis=0 )
__A : str = np.array(a ).astype(np.floataa ) / 255.0
__A : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
__A : Optional[Any] = 2.0 * image - 1.0
__A : List[Any] = torch.from_numpy(a )
elif isinstance(image[0] , torch.Tensor ):
__A : Tuple = torch.cat(a , dim=0 )
return image
def _SCREAMING_SNAKE_CASE ( a ) -> Any:
if isinstance(a , torch.Tensor ):
return mask
elif isinstance(a , PIL.Image.Image ):
__A : str = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__A : int = mask[0].size
__A : Any = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__A : List[Any] = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
__A : Dict = np.concatenate(a , axis=0 )
__A : Optional[Any] = mask.astype(np.floataa ) / 255.0
__A : Tuple = 0
__A : Optional[Any] = 1
__A : Any = torch.from_numpy(a )
elif isinstance(mask[0] , torch.Tensor ):
__A : Optional[int] = torch.cat(a , dim=0 )
return mask
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : UNetaDModel
UpperCamelCase : RePaintScheduler
def __init__( self , _A , _A ):
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , _A , _A , _A = 250 , _A = 0.0 , _A = 10 , _A = 10 , _A = None , _A = "pil" , _A = True , ):
__A : Tuple = image
__A : Tuple = _preprocess_image(_A )
__A : Tuple = original_image.to(device=self.device , dtype=self.unet.dtype )
__A : List[Any] = _preprocess_mask(_A )
__A : Optional[int] = mask_image.to(device=self.device , dtype=self.unet.dtype )
__A : Optional[Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__A : Optional[int] = original_image.shape
__A : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_A , _A , _A , self.device )
__A : Tuple = eta
__A : Any = self.scheduler.timesteps[0] + 1
__A : Optional[Any] = generator[0] if isinstance(_A , _A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__A : Tuple = self.unet(_A , _A ).sample
# compute previous image: x_t -> x_t-1
__A : List[Any] = self.scheduler.step(_A , _A , _A , _A , _A , _A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__A : Union[str, Any] = self.scheduler.undo_step(_A , _A , _A )
__A : Any = t
__A : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
__A : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A : Tuple = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 714 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A( snake_case__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( _A ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self ):
raise NotImplementedError()
| 77 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : List[str] = '''https://openaipublic.azureedge.net/jukebox/models/'''
UpperCAmelCase : str = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
__A : List[Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
__A : Union[str, Any] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
__A : Union[str, Any] = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
__A : List[Any] = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
__A : Optional[Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
__A : Optional[Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__A : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
__A : Optional[int] = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Any:
__A : Optional[Any] = {}
import re
__A : List[str] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : Union[str, Any] = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : List[Any] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[Any] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__A : Optional[int] = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : List[str] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__A : str = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
__A : Union[str, Any] = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__A : Optional[int] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(a ):
__A : Dict = re_encoder_block_conv_in.match(a )
__A : Dict = regex_match.groups()
__A : Tuple = int(groups[2] ) * 2 + int(groups[3] )
__A : Union[str, Any] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__A : Optional[Any] = re_encoder_block_conv_in.sub(a , a )
elif re_encoder_block_resnet.fullmatch(a ):
__A : int = re_encoder_block_resnet.match(a )
__A : Union[str, Any] = regex_match.groups()
__A : Dict = int(groups[2] ) * 2 + int(groups[3] )
__A : str = {'1': 1, '3': 2}[groups[-2]]
__A : Tuple = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__A : int = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__A : Tuple = prefix + resnet_block
__A : str = re_encoder_block_resnet.sub(a , a )
elif re_encoder_block_proj_out.fullmatch(a ):
__A : List[Any] = re_encoder_block_proj_out.match(a )
__A : Tuple = regex_match.groups()
__A : List[Any] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__A : List[str] = re_encoder_block_proj_out.sub(a , a )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(a ):
__A : Tuple = re_decoder_block_conv_out.match(a )
__A : Dict = regex_match.groups()
__A : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : Tuple = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__A : int = re_decoder_block_conv_out.sub(a , a )
elif re_decoder_block_resnet.fullmatch(a ):
__A : Any = re_decoder_block_resnet.match(a )
__A : Union[str, Any] = regex_match.groups()
__A : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : int = {'1': 1, '3': 2}[groups[-2]]
__A : Any = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__A : int = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__A : Union[str, Any] = prefix + resnet_block
__A : str = re_decoder_block_resnet.sub(a , a )
elif re_decoder_block_proj_in.fullmatch(a ):
__A : List[Any] = re_decoder_block_proj_in.match(a )
__A : Dict = regex_match.groups()
__A : Optional[int] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__A : Any = re_decoder_block_proj_in.sub(a , a )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(a ):
__A : Optional[Any] = re_prior_cond_conv_out.match(a )
__A : Tuple = regex_match.groups()
__A : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : Tuple = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__A : str = re_prior_cond_conv_out.sub(a , a )
elif re_prior_cond_resnet.fullmatch(a ):
__A : Optional[int] = re_prior_cond_resnet.match(a )
__A : Optional[Any] = regex_match.groups()
__A : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : Dict = {'1': 1, '3': 2}[groups[-2]]
__A : Dict = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__A : Tuple = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__A : List[Any] = prefix + resnet_block
__A : str = re_prior_cond_resnet.sub(a , a )
elif re_prior_cond_proj_in.fullmatch(a ):
__A : Optional[int] = re_prior_cond_proj_in.match(a )
__A : Union[str, Any] = regex_match.groups()
__A : int = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__A : int = re_prior_cond_proj_in.sub(a , a )
# keep original key
else:
__A : Dict = original_key
__A : Any = replace_key(a )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
__A : Dict = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__A : List[Any] = original_key
__A : List[str] = original_key
__A : Union[str, Any] = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a=None , a=None ) -> Any:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
__A : Any = requests.get(F"""{PREFIX}{file}""" , allow_redirects=a )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=a )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , 'wb' ).write(r.content )
__A : Tuple = MODEL_MAPPING[model_name.split('/' )[-1]]
__A : Any = JukeboxConfig.from_pretrained(a )
__A : Optional[Any] = JukeboxModel(a )
__A : str = []
__A : str = {}
for i, dict_name in enumerate(a ):
__A : List[Any] = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['model']
__A : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
__A : Optional[int] = old_dic[k]
elif k.endswith('.w' ):
__A : List[str] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__A : List[str] = old_dic[k]
else:
__A : Dict = old_dic[k]
__A : List[str] = 'vqvae' if i == 0 else F"""priors.{3 - i}"""
__A : Optional[Any] = fix_jukebox_keys(a , model.state_dict() , a , a )
weight_dict.append(a )
__A : Dict = weight_dict.pop(0 )
model.vqvae.load_state_dict(a )
for i in range(len(a ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(a ).mkdir(exist_ok=a )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(a , a )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
UpperCAmelCase : Dict = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 715 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Optional[int] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 77 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase : int = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
__A : Optional[int] = set()
__A : Union[str, Any] = []
def parse_line(a ):
for line in fp:
if isinstance(a , a ):
__A : str = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(a ) > 0:
__A : int = '\n'.join(a )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(a )
buffer.clear()
continue
else:
__A : Optional[Any] = line.strip()
buffer.append(a )
if from_gh:
for filename in os.listdir(a ):
__A : Any = os.path.join(a , a )
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with open(a ) as fp:
parse_line(a )
else:
try:
with zipfile.ZipFile(a ) as z:
for filename in z.namelist():
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with z.open(a ) as fp:
parse_line(a )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def _SCREAMING_SNAKE_CASE ( a , a ) -> Any:
__A : Union[str, Any] = set()
__A : List[str] = [os.path.join(a , a ) for p in os.listdir(a ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a , a ) )
return selected_warnings
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
return values.split(',' )
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
UpperCAmelCase : str = parser.parse_args()
UpperCAmelCase : Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase : int = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 716 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = ShapEPipeline
UpperCamelCase : str = ['''prompt''']
UpperCamelCase : Tuple = ['''prompt''']
UpperCamelCase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase : int = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 8
@property
def UpperCAmelCase_ ( self ):
__A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__A : Optional[Any] = PriorTransformer(**_A )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__A : List[Any] = ShapERenderer(**_A )
return model
def UpperCAmelCase_ ( self ):
__A : List[str] = self.dummy_prior
__A : Optional[int] = self.dummy_text_encoder
__A : List[Any] = self.dummy_tokenizer
__A : str = self.dummy_renderer
__A : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
__A : Any = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(_A ).startswith('mps' ):
__A : List[Any] = torch.manual_seed(_A )
else:
__A : Dict = torch.Generator(device=_A ).manual_seed(_A )
__A : int = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : Tuple = 'cpu'
__A : Any = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**_A )
__A : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Tuple = pipe(**self.get_dummy_inputs(_A ) )
__A : int = output.images[0]
__A : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__A : Any = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self ):
__A : List[str] = torch_device == 'cpu'
__A : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCAmelCase_ ( self ):
__A : Any = self.get_dummy_components()
__A : Any = self.pipeline_class(**_A )
__A : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Any = 1
__A : Dict = 2
__A : Tuple = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
__A : Optional[int] = batch_size * [inputs[key]]
__A : Optional[int] = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__A : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__A : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : str = torch.Generator(device=_A ).manual_seed(0 )
__A : Tuple = pipe(
'a shark' , generator=_A , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 77 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase : Optional[int] = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
__A : Any = {}
__A : str = os.path.join(a , 'all_results.json' )
if os.path.exists(a ):
with open(a , 'r' ) as f:
__A : List[str] = json.load(a )
else:
raise ValueError(F"""can't find {path}""" )
return results
UpperCAmelCase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _A( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
import xla_spawn
__A : str = self.get_auto_remove_tmp_dir()
__A : List[str] = F"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_A , 'argv' , _A ):
__A : Optional[Any] = time()
xla_spawn.main()
__A : Optional[Any] = time()
__A : str = get_results(_A )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCAmelCase_ ( self ):
import xla_spawn
__A : Optional[Any] = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_A , 'argv' , _A ):
xla_spawn.main()
| 717 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if len(a ) != 2 or len(a[0] ) != 2 or len(a ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__A : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[int]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[list, list, list, list]:
if len(a ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__A : str = len(a )
__A : List[Any] = matrix_length // 2
__A : List[str] = [[a[i][j] for j in range(a , a )] for i in range(a )]
__A : Dict = [
[a[i][j] for j in range(a , a )] for i in range(a , a )
]
__A : int = [[a[i][j] for j in range(a )] for i in range(a )]
__A : Any = [[a[i][j] for j in range(a )] for i in range(a , a )]
return top_left, top_right, bot_left, bot_right
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[int, int]:
return len(a ), len(matrix[0] )
def _SCREAMING_SNAKE_CASE ( a ) -> None:
print('\n'.join(str(a ) for line in matrix ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a ) == (2, 2):
return default_matrix_multiplication(a , a )
__A , __A , __A , __A : str = split_matrix(a )
__A , __A , __A , __A : List[Any] = split_matrix(a )
__A : Any = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Tuple = actual_strassen(matrix_addition(a , a ) , a )
__A : List[str] = actual_strassen(matrix_addition(a , a ) , a )
__A : Optional[int] = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Any = actual_strassen(matrix_addition(a , a ) , matrix_addition(a , a ) )
__A : Any = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
__A : Union[str, Any] = matrix_addition(a , a )
__A : str = matrix_addition(a , a )
__A : Dict = matrix_subtraction(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
# construct the new matrix from our 4 quadrants
__A : List[Any] = []
for i in range(len(a ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a )[1] != matrix_dimensions(a )[0]:
__A : Dict = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a )
__A : int = matrix_dimensions(a )
__A : Any = matrix_dimensions(a )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__A : List[Any] = max(*a , *a )
__A : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(a ) ) ) )
__A : Union[str, Any] = matrixa
__A : Optional[int] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__A : str = actual_strassen(a , a )
# Removing the additional zeros
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 77 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _A( nn.Module ):
"""simple docstring"""
def __init__( self , _A = 16 , _A = 88 , _A = None , _A = 1 , _A = 0.0 , _A = 32 , _A = None , _A = False , _A = None , _A = None , _A = "geglu" , _A = None , ):
super().__init__()
__A : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_A , attention_head_dim=_A , in_channels=_A , num_layers=_A , dropout=_A , norm_num_groups=_A , cross_attention_dim=_A , attention_bias=_A , sample_size=_A , num_vector_embeds=_A , activation_fn=_A , num_embeds_ada_norm=_A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__A : List[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__A : Any = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__A : Dict = [1, 0]
def UpperCAmelCase_ ( self , _A , _A , _A=None , _A=None , _A=None , _A = True , ):
__A : List[str] = hidden_states
__A : Union[str, Any] = []
__A : Union[str, Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__A : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__A : str = self.transformer_index_for_condition[i]
__A : Optional[Any] = self.transformers[transformer_index](
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , return_dict=_A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__A : List[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__A : Optional[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_A )
| 718 |
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[str] = []
__A : Tuple = []
__A : Union[str, Any] = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
__A : List[str] = len(a ) if (len(a ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(a ) , 'Postfix'.center(a ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a ) == 0:
stack.append(a ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(a ) # push x to stack
print(
x.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
while len(a ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(a )).ljust(a ) , (''.join(a )).ljust(a ) , sep=' | ' , ) # Output in tabular format
return "".join(a ) # return Postfix as str
def _SCREAMING_SNAKE_CASE ( a ) -> List[str]:
__A : List[Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(a ) ):
if infix[i] == "(":
__A : List[str] = ')' # change "(" to ")"
elif infix[i] == ")":
__A : Any = '(' # change ")" to "("
return (infix_2_postfix(''.join(a ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase : List[str] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase : Union[str, Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 77 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = '''yolos'''
def __init__( self , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1e-1_2 , _A=[512, 864] , _A=16 , _A=3 , _A=True , _A=100 , _A=True , _A=False , _A=1 , _A=5 , _A=2 , _A=5 , _A=2 , _A=0.1 , **_A , ):
super().__init__(**_A )
__A : Any = hidden_size
__A : List[Any] = num_hidden_layers
__A : Tuple = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Optional[Any] = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : List[Any] = initializer_range
__A : List[Any] = layer_norm_eps
__A : List[Any] = image_size
__A : str = patch_size
__A : Dict = num_channels
__A : Dict = qkv_bias
__A : Optional[int] = num_detection_tokens
__A : Union[str, Any] = use_mid_position_embeddings
__A : Tuple = auxiliary_loss
# Hungarian matcher
__A : str = class_cost
__A : int = bbox_cost
__A : str = giou_cost
# Loss coefficients
__A : Optional[int] = bbox_loss_coefficient
__A : Optional[int] = giou_loss_coefficient
__A : Optional[int] = eos_coefficient
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self ):
return 1e-4
@property
def UpperCAmelCase_ ( self ):
return 12
| 719 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : Tuple = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase : int = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = '''mask2former'''
UpperCamelCase : Any = ['''swin''']
UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , _A = None , _A = 256 , _A = 256 , _A = 256 , _A = 1024 , _A = "relu" , _A = 6 , _A = 10 , _A = 8 , _A = 0.0 , _A = 2048 , _A = False , _A = False , _A = 4 , _A = 255 , _A = 100 , _A = 0.1 , _A = 2.0 , _A = 5.0 , _A = 5.0 , _A = 12544 , _A = 3.0 , _A = 0.7_5 , _A = 0.0_2 , _A = 1.0 , _A = True , _A = [4, 8, 16, 32] , _A = None , **_A , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__A : Optional[int] = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_A , _A ):
__A : Dict = backbone_config.pop('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[str] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
__A : Optional[int] = backbone_config
__A : Optional[Any] = feature_size
__A : Any = mask_feature_size
__A : Optional[Any] = hidden_dim
__A : Union[str, Any] = encoder_feedforward_dim
__A : Optional[Any] = activation_function
__A : List[Any] = encoder_layers
__A : Union[str, Any] = decoder_layers
__A : Dict = num_attention_heads
__A : Tuple = dropout
__A : Dict = dim_feedforward
__A : Tuple = pre_norm
__A : Dict = enforce_input_projection
__A : Optional[int] = common_stride
__A : Optional[Any] = ignore_value
__A : str = num_queries
__A : List[Any] = no_object_weight
__A : List[str] = class_weight
__A : List[Any] = mask_weight
__A : List[Any] = dice_weight
__A : Tuple = train_num_points
__A : Optional[Any] = oversample_ratio
__A : Union[str, Any] = importance_sample_ratio
__A : Union[str, Any] = init_std
__A : int = init_xavier_std
__A : Union[str, Any] = use_auxiliary_loss
__A : Union[str, Any] = feature_strides
__A : List[Any] = output_auxiliary_logits
__A : Optional[Any] = decoder_layers
super().__init__(**_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
return cls(
backbone_config=_A , **_A , )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = copy.deepcopy(self.__dict__ )
__A : List[Any] = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
| 77 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCAmelCase : Union[str, Any] = '''pt'''
elif is_tf_available():
UpperCAmelCase : str = '''tf'''
else:
UpperCAmelCase : Union[str, Any] = '''jax'''
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = ByTaTokenizer
UpperCamelCase : Union[str, Any] = False
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Optional[int] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self ):
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def UpperCAmelCase_ ( self , **_A ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self , _A , _A=False , _A=20 , _A=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__A : List[str] = []
for i in range(len(_A ) ):
try:
__A : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__A : Optional[Any] = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) )
__A : Optional[int] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__A : List[str] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__A : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
__A : Dict = [t[0] for t in toks]
# Ensure consistency
__A : Dict = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__A : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__A : Any = ' ' + output_txt
__A : str = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.ta_base_tokenizer
__A : Any = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__A : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def UpperCAmelCase_ ( self ):
__A : int = self.ta_base_tokenizer
__A : List[str] = 'Unicode €.'
__A : Dict = tokenizer(_A )
__A : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__A : Union[str, Any] = tokenizer.decode(_A )
self.assertEqual(_A , 'Unicode €.</s>' )
__A : List[Any] = tokenizer('e è é ê ë' )
__A : Union[str, Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__A : Optional[int] = tokenizer.decode(_A )
self.assertEqual(_A , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.ta_base_tokenizer
__A : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__A : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__A : List[Any] = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__A : Optional[int] = list(batch.input_ids.numpy()[0] )
else:
__A : Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.ta_base_tokenizer
__A : Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__A : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('decoder_input_ids' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
def UpperCAmelCase_ ( self ):
__A : str = self.ta_base_tokenizer
__A : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
__A : Optional[Any] = tokenizer(
text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def UpperCAmelCase_ ( self ):
__A : str = self.ta_base_tokenizer
__A : Optional[int] = ['A long paragraph for summarization. </s>']
__A : Tuple = ['Summary of the text. </s>']
# fmt: off
__A : str = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__A : Dict = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__A : Dict = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['input_ids'][0] )
self.assertEqual(_A , batch['labels'][0] )
def UpperCAmelCase_ ( self ):
# safety check on max_len default value so we are sure the test works
__A : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__A : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__A : Union[str, Any] = tempfile.mkdtemp()
__A : Tuple = ' He is very happy, UNwant\u00E9d,running'
__A : Any = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__A : int = tokenizer.__class__.from_pretrained(_A )
__A : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__A : int = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__A : Union[str, Any] = tempfile.mkdtemp()
__A : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__A : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__A : Any = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__A : Optional[int] = tokenizer.__class__.from_pretrained(_A )
__A : int = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__A : Any = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__A : Union[str, Any] = json.load(_A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__A : Any = json.load(_A )
__A : Optional[int] = [F"""<extra_id_{i}>""" for i in range(125 )]
__A : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
__A : str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__A : str = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__A : str = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )]
__A : Optional[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def UpperCAmelCase_ ( self ):
__A : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
__A : str = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__A : List[Any] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__A : Dict = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__A : Tuple = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def UpperCAmelCase_ ( self ):
__A : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__A : Dict = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__A : Dict = 0
__A : Tuple = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] )
setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 720 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str = '''conditional_detr'''
UpperCamelCase : int = ['''past_key_values''']
UpperCamelCase : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _A=True , _A=None , _A=3 , _A=300 , _A=6 , _A=2048 , _A=8 , _A=6 , _A=2048 , _A=8 , _A=0.0 , _A=0.0 , _A=True , _A="relu" , _A=256 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1.0 , _A=False , _A="sine" , _A="resnet50" , _A=True , _A=False , _A=2 , _A=5 , _A=2 , _A=1 , _A=1 , _A=2 , _A=5 , _A=2 , _A=0.2_5 , **_A , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__A : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_A , _A ):
__A : Tuple = backbone_config.get('model_type' )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : List[Any] = config_class.from_dict(_A )
__A : Tuple = use_timm_backbone
__A : List[str] = backbone_config
__A : Dict = num_channels
__A : int = num_queries
__A : int = d_model
__A : str = encoder_ffn_dim
__A : List[str] = encoder_layers
__A : Optional[Any] = encoder_attention_heads
__A : Union[str, Any] = decoder_ffn_dim
__A : List[Any] = decoder_layers
__A : Optional[Any] = decoder_attention_heads
__A : Any = dropout
__A : Any = attention_dropout
__A : int = activation_dropout
__A : Optional[int] = activation_function
__A : Union[str, Any] = init_std
__A : Union[str, Any] = init_xavier_std
__A : Optional[Any] = encoder_layerdrop
__A : int = decoder_layerdrop
__A : List[str] = encoder_layers
__A : str = auxiliary_loss
__A : Union[str, Any] = position_embedding_type
__A : Optional[int] = backbone
__A : List[str] = use_pretrained_backbone
__A : List[Any] = dilation
# Hungarian matcher
__A : List[str] = class_cost
__A : Optional[int] = bbox_cost
__A : Dict = giou_cost
# Loss coefficients
__A : Optional[int] = mask_loss_coefficient
__A : Union[str, Any] = dice_loss_coefficient
__A : List[Any] = cls_loss_coefficient
__A : Dict = bbox_loss_coefficient
__A : Tuple = giou_loss_coefficient
__A : Tuple = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCAmelCase_ ( self ):
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ):
return self.d_model
def UpperCAmelCase_ ( self ):
__A : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__A : Dict = self.backbone_config.to_dict()
__A : Union[str, Any] = self.__class__.model_type
return output
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCAmelCase_ ( self ):
return 1e-5
@property
def UpperCAmelCase_ ( self ):
return 12
| 77 | 0 |
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
if len(a ) != len(a ):
raise ValueError('String lengths must match!' )
__A : Tuple = 0
for chara, chara in zip(a , a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _A( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
__A : List[str] = nn.Linear(3 , 4 )
__A : Optional[Any] = nn.BatchNormad(4 )
__A : List[Any] = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self , _A ):
return self.lineara(self.batchnorm(self.lineara(_A ) ) )
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , model.state_dict() )
__A : str = os.path.join(_A , 'index.json' )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__A : Optional[int] = os.path.join(_A , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase_ ( self ):
__A : Dict = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__A : Tuple = torch.randn(2 , 3 , dtype=_A )
with TemporaryDirectory() as tmp_dir:
__A : int = offload_weight(_A , 'weight' , _A , {} )
__A : Union[str, Any] = os.path.join(_A , 'weight.dat' )
self.assertTrue(os.path.isfile(_A ) )
self.assertDictEqual(_A , {'weight': {'shape': [2, 3], 'dtype': str(_A ).split('.' )[1]}} )
__A : List[str] = load_offloaded_weight(_A , index['weight'] )
self.assertTrue(torch.equal(_A , _A ) )
def UpperCAmelCase_ ( self ):
__A : int = ModelForTest()
__A : Union[str, Any] = model.state_dict()
__A : Optional[Any] = {k: v for k, v in state_dict.items() if 'linear2' not in k}
__A : str = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : List[str] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
__A : Union[str, Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
__A : List[Any] = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : Optional[int] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
# Duplicates are removed
__A : str = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
def UpperCAmelCase_ ( self ):
__A : Dict = {'a.1': 0, 'a.10': 1, 'a.2': 2}
__A : str = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1': 0, 'a.2': 2} )
__A : Optional[Any] = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
__A : Any = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1.a': 0, 'a.2.a': 2} )
| 77 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCAmelCase : Optional[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( a , a=1.0 , a=None , a=None ) -> Dict:
if rng is None:
__A : List[Any] = global_rng
__A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=400 , _A=2000 , _A=10 , _A=160 , _A=8 , _A=0.0 , _A=4000 , _A=False , _A=True , ):
__A : int = parent
__A : Optional[Any] = batch_size
__A : Any = min_seq_length
__A : Optional[Any] = max_seq_length
__A : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__A : Optional[Any] = padding_value
__A : Optional[int] = sampling_rate
__A : Optional[int] = return_attention_mask
__A : Optional[int] = do_normalize
__A : Optional[int] = feature_size
__A : Union[str, Any] = chunk_length
__A : Dict = hop_length
def UpperCAmelCase_ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase_ ( self , _A=False , _A=False ):
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
__A : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__A : List[str] = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = WhisperFeatureExtractionTester(self )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__A : int = feat_extract_first.save_pretrained(_A )[0]
check_json_file_has_correct_format(_A )
__A : Optional[int] = self.feature_extraction_class.from_pretrained(_A )
__A : Dict = feat_extract_first.to_dict()
__A : int = feat_extract_second.to_dict()
__A : int = feat_extract_first.mel_filters
__A : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_A , _A ) )
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__A : Dict = os.path.join(_A , 'feat_extract.json' )
feat_extract_first.to_json_file(_A )
__A : str = self.feature_extraction_class.from_json_file(_A )
__A : Optional[Any] = feat_extract_first.to_dict()
__A : Dict = feat_extract_second.to_dict()
__A : Union[str, Any] = feat_extract_first.mel_filters
__A : Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_A , _A ) )
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__A : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__A : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__A : Optional[int] = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
__A : Any = feature_extractor(_A , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__A : Dict = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__A : Any = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
__A : Any = feature_extractor(_A , return_tensors='np' ).input_features
__A : int = feature_extractor(_A , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__A : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__A : Tuple = np.asarray(_A )
__A : Union[str, Any] = feature_extractor(_A , return_tensors='np' ).input_features
__A : List[Any] = feature_extractor(_A , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test truncation required
__A : List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__A : Optional[Any] = [np.asarray(_A ) for speech_input in speech_inputs]
__A : str = [x[: feature_extractor.n_samples] for x in speech_inputs]
__A : Any = [np.asarray(_A ) for speech_input in speech_inputs_truncated]
__A : str = feature_extractor(_A , return_tensors='np' ).input_features
__A : List[Any] = feature_extractor(_A , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
import torch
__A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
__A : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__A : Tuple = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__A : Any = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase_ ( self , _A ):
__A : List[str] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__A : List[Any] = ds.sort('id' ).select(range(_A ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCAmelCase_ ( self ):
# fmt: off
__A : List[Any] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
__A : Tuple = self._load_datasamples(1 )
__A : List[str] = WhisperFeatureExtractor()
__A : Any = feature_extractor(_A , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _A , atol=1e-4 ) )
def UpperCAmelCase_ ( self ):
__A : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A : Tuple = self._load_datasamples(1 )[0]
__A : Tuple = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__A : Tuple = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_A )[0]
self.assertTrue(np.all(np.mean(_A ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A ) - 1 ) < 1e-3 ) )
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A ):
__A : Any = data
def __iter__( self ):
for element in self.data:
yield element
def _SCREAMING_SNAKE_CASE ( a=True ) -> Any:
__A : List[Any] = Accelerator(even_batches=a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _SCREAMING_SNAKE_CASE ( a , a , a , a = False ) -> str:
if iterable:
__A : int = DummyIterableDataset(torch.as_tensor(range(a ) ) )
else:
__A : Optional[Any] = TensorDataset(torch.as_tensor(range(a ) ) )
__A : Optional[Any] = DataLoader(a , batch_size=a )
__A : Optional[int] = accelerator.prepare(a )
return dl
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a , ) -> Union[str, Any]:
__A : Optional[int] = create_dataloader(accelerator=a , dataset_size=a , batch_size=a )
__A : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : int = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : str = create_accelerator(even_batches=a )
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : Optional[Any] = create_accelerator(even_batches=a )
__A : str = torch.nn.Linear(1 , 1 )
__A : Optional[int] = accelerator.prepare(a )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a ):
__A : Dict = ddp_model(batch[0].float() )
__A : List[str] = output.sum()
loss.backward()
batch_idxs.append(a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for multi-GPU" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : int = True
__A : Union[str, Any] = False
__A : Optional[int] = create_accelerator(even_batches=a )
__A : int = torch.nn.Linear(1 , 1 )
__A : List[Any] = accelerator.prepare(a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : List[str] = train_dl.batch_sampler.even_batches
__A : Dict = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Any = True
__A : List[Any] = False
__A : Tuple = create_accelerator(even_batches=a )
__A : List[str] = torch.nn.Linear(1 , 1 )
__A : Optional[Any] = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : Tuple = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__A : Any = create_accelerator()
__A : Union[str, Any] = torch.nn.Linear(1 , 1 )
__A : str = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for map-style datasets" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : str = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
__A : int = accelerator.state.distributed_type
__A : Tuple = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a )
__A : str = original_state
if __name__ == "__main__":
main()
| 77 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = '''unispeech-sat'''
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=1e-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=False , _A=True , _A=0.0_5 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=504 , **_A , ):
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__A : Tuple = hidden_size
__A : int = feat_extract_norm
__A : Optional[Any] = feat_extract_activation
__A : List[Any] = list(_A )
__A : Any = list(_A )
__A : Dict = list(_A )
__A : Any = conv_bias
__A : Any = num_conv_pos_embeddings
__A : str = num_conv_pos_embedding_groups
__A : Tuple = len(self.conv_dim )
__A : Tuple = num_hidden_layers
__A : str = intermediate_size
__A : Dict = hidden_act
__A : int = num_attention_heads
__A : Dict = hidden_dropout
__A : int = attention_dropout
__A : Tuple = activation_dropout
__A : int = feat_proj_dropout
__A : Tuple = final_dropout
__A : int = layerdrop
__A : List[str] = layer_norm_eps
__A : List[str] = initializer_range
__A : Union[str, Any] = vocab_size
__A : Union[str, Any] = num_clusters
__A : int = do_stable_layer_norm
__A : Optional[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Union[str, Any] = apply_spec_augment
__A : Optional[Any] = mask_time_prob
__A : Optional[int] = mask_time_length
__A : Tuple = mask_time_min_masks
__A : Any = mask_feature_prob
__A : Union[str, Any] = mask_feature_length
__A : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : List[Any] = num_codevectors_per_group
__A : str = num_codevector_groups
__A : int = contrastive_logits_temperature
__A : Tuple = feat_quantizer_dropout
__A : str = num_negatives
__A : Tuple = codevector_dim
__A : List[str] = proj_codevector_dim
__A : Optional[int] = diversity_loss_weight
# ctc loss
__A : Any = ctc_loss_reduction
__A : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__A : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__A : Tuple = list(_A )
__A : Tuple = list(_A )
__A : List[Any] = list(_A )
__A : List[Any] = xvector_output_dim
@property
def UpperCAmelCase_ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 701 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : str = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = '''codegen'''
UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _A=50400 , _A=2048 , _A=2048 , _A=4096 , _A=28 , _A=16 , _A=64 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.0_2 , _A=True , _A=50256 , _A=50256 , _A=False , **_A , ):
__A : Any = vocab_size
__A : Tuple = n_ctx
__A : Union[str, Any] = n_positions
__A : Optional[Any] = n_embd
__A : Any = n_layer
__A : Dict = n_head
__A : Union[str, Any] = n_inner
__A : List[Any] = rotary_dim
__A : str = activation_function
__A : Any = resid_pdrop
__A : Tuple = embd_pdrop
__A : Tuple = attn_pdrop
__A : Union[str, Any] = layer_norm_epsilon
__A : str = initializer_range
__A : Optional[Any] = use_cache
__A : Union[str, Any] = bos_token_id
__A : Tuple = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , 'pad_token_id' , _A ):
# TODO: how to do that better?
__A : Dict = 0
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
__A : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__A : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self ):
return self._config.n_layer
@property
def UpperCAmelCase_ ( self ):
return self._config.n_head
def UpperCAmelCase_ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__A : Any = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__A : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A , __A : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__A : Any = seqlen + 2
__A : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A : Optional[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__A : Tuple = common_inputs['attention_mask']
if self.use_past:
__A : str = ordered_inputs['attention_mask'].dtype
__A : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self ):
return 13
| 77 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Optional[int]:
__A : int = AutoConfig.from_pretrained(a )
__A : Union[str, Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=a )
__A : int = checkpoints.load_tax_checkpoint(a )
__A : Tuple = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
__A : List[Any] = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__A : List[str] = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__A : Any = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
__A : int = F"""layers_{str(a )}"""
# Self-Attention
__A : Tuple = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
__A : Tuple = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
__A : List[str] = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
__A : List[Any] = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__A : Dict = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
__A : str = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
__A : str = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
__A : int = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
__A : int = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
__A : Optional[int] = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
__A : Any = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
__A : str = flax_model.params['encoder']['block'][str(a )]['layer']
__A : Dict = tax_attention_key
__A : List[str] = tax_attention_out
__A : List[str] = tax_attention_query
__A : Optional[Any] = tax_attention_value
__A : Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__A : str = tax_global_layer_norm
if split_mlp_wi:
__A : Union[str, Any] = tax_mlp_wi_a
__A : Dict = tax_mlp_wi_a
else:
__A : Optional[Any] = tax_mlp_wi
__A : List[Any] = tax_mlp_wo
__A : Union[str, Any] = tax_mlp_layer_norm
__A : int = flax_model_encoder_layer_block
# Only for layer 0:
__A : int = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
__A : Tuple = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__A : Tuple = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
__A : Tuple = tax_encoder_global_rel_embedding
# Assigning
__A : List[str] = tax_model['target']['encoder']['encoder_norm']['scale']
__A : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__A : List[str] = F"""layers_{str(a )}"""
# Self-Attention
__A : Union[str, Any] = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
__A : int = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
__A : Any = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
__A : int = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
__A : Union[str, Any] = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
__A : Optional[Any] = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
__A : Dict = tax_enc_dec_attention_module['key']['kernel']
__A : Dict = tax_enc_dec_attention_module['out']['kernel']
__A : Optional[Any] = tax_enc_dec_attention_module['query']['kernel']
__A : Tuple = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
__A : List[Any] = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
__A : str = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
__A : Tuple = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
__A : Optional[Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
__A : Optional[Any] = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
__A : Union[str, Any] = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
__A : Union[str, Any] = flax_model.params['decoder']['block'][str(a )]['layer']
__A : Any = tax_attention_key
__A : str = tax_attention_out
__A : Any = tax_attention_query
__A : List[Any] = tax_attention_value
__A : Tuple = tax_pre_attention_layer_norm
__A : List[Any] = tax_enc_dec_attention_key
__A : Optional[int] = tax_enc_dec_attention_out
__A : Union[str, Any] = tax_enc_dec_attention_query
__A : Union[str, Any] = tax_enc_dec_attention_value
__A : List[Any] = tax_cross_layer_norm
if split_mlp_wi:
__A : List[Any] = tax_mlp_wi_a
__A : Optional[int] = tax_mlp_wi_a
else:
__A : Union[str, Any] = tax_mlp_wi
__A : Optional[int] = tax_mlp_wo
__A : int = txa_mlp_layer_norm
__A : Optional[Any] = flax_model_decoder_layer_block
# Decoder Normalization
__A : Tuple = tax_model['target']['decoder']['decoder_norm']['scale']
__A : List[Any] = txa_decoder_norm
# Only for layer 0:
__A : Optional[Any] = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
__A : Dict = tax_decoder_rel_embedding
# Token Embeddings
__A : str = tax_model['target']['token_embedder']['embedding']
__A : Tuple = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__A : List[Any] = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(a )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 702 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 77 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.