code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import pprint
import requests
lowercase_ = '''https://zenquotes.io/api'''
def __lowerCAmelCase ( ) -> int:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __lowerCAmelCase ( ) -> Dict:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
lowercase_ = random_quotes()
pprint.pprint(response)
| 354 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
a__ : List[str] = len(lowerCAmelCase__ )
a__ : int = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
a__ : Dict = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( UpperCamelCase ):
__A : UNetaDModel
__A : ScoreSdeVeScheduler
def __init__( self : int , _A : UNetaDModel , _A : ScoreSdeVeScheduler):
super().__init__()
self.register_modules(unet=_A , scheduler=_A)
@torch.no_grad()
def __call__( self : Union[str, Any] , _A : int = 1 , _A : int = 2000 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[str] = "pil" , _A : bool = True , **_A : Optional[int] , ):
A__ : Any = self.unet.config.sample_size
A__ : Any = (batch_size, 3, img_size, img_size)
A__ : str = self.unet
A__ : int = randn_tensor(_A , generator=_A) * self.scheduler.init_noise_sigma
A__ : Optional[Any] = sample.to(self.device)
self.scheduler.set_timesteps(_A)
self.scheduler.set_sigmas(_A)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
A__ : int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
A__ : List[Any] = self.unet(_A , _A).sample
A__ : List[str] = self.scheduler.step_correct(_A , _A , generator=_A).prev_sample
# prediction step
A__ : Dict = model(_A , _A).sample
A__ : List[str] = self.scheduler.step_pred(_A , _A , _A , generator=_A)
A__ : List[Any] = output.prev_sample, output.prev_sample_mean
A__ : Dict = sample_mean.clamp(0 , 1)
A__ : Dict = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
A__ : str = self.numpy_to_pil(_A)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_A) | 704 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( __lowercase , __lowercase ) -> int:
"""simple docstring"""
A__ : List[str] = checkpoint
A__ : Dict = {}
A__ : Union[str, Any] = vae_state_dict["encoder.conv_in.weight"]
A__ : int = vae_state_dict["encoder.conv_in.bias"]
A__ : List[Any] = vae_state_dict["encoder.conv_out.weight"]
A__ : Optional[int] = vae_state_dict["encoder.conv_out.bias"]
A__ : int = vae_state_dict["encoder.norm_out.weight"]
A__ : Optional[int] = vae_state_dict["encoder.norm_out.bias"]
A__ : Optional[int] = vae_state_dict["decoder.conv_in.weight"]
A__ : Optional[Any] = vae_state_dict["decoder.conv_in.bias"]
A__ : str = vae_state_dict["decoder.conv_out.weight"]
A__ : Optional[int] = vae_state_dict["decoder.conv_out.bias"]
A__ : Union[str, Any] = vae_state_dict["decoder.norm_out.weight"]
A__ : Optional[Any] = vae_state_dict["decoder.norm_out.bias"]
A__ : Optional[Any] = vae_state_dict["quant_conv.weight"]
A__ : Optional[Any] = vae_state_dict["quant_conv.bias"]
A__ : Dict = vae_state_dict["post_quant_conv.weight"]
A__ : Union[str, Any] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
A__ : str = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
A__ : Optional[int] = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(__lowercase )
}
# Retrieves the keys for the decoder up blocks only
A__ : Any = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
A__ : Any = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(__lowercase )
}
for i in range(__lowercase ):
A__ : Any = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
A__ : Any = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
A__ : Optional[int] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
A__ : Any = renew_vae_resnet_paths(__lowercase )
A__ : int = {"old": F'down.{i}.block', "new": F'down_blocks.{i}.resnets'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : int = [key for key in vae_state_dict if "encoder.mid.block" in key]
A__ : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ : Optional[int] = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
A__ : Optional[Any] = renew_vae_resnet_paths(__lowercase )
A__ : int = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : str = [key for key in vae_state_dict if "encoder.mid.attn" in key]
A__ : List[Any] = renew_vae_attention_paths(__lowercase )
A__ : Optional[int] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
for i in range(__lowercase ):
A__ : Dict = num_up_blocks - 1 - i
A__ : int = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
A__ : Tuple = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
A__ : List[Any] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
A__ : str = renew_vae_resnet_paths(__lowercase )
A__ : List[str] = {"old": F'up.{block_id}.block', "new": F'up_blocks.{i}.resnets'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : int = [key for key in vae_state_dict if "decoder.mid.block" in key]
A__ : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ : Union[str, Any] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
A__ : Dict = renew_vae_resnet_paths(__lowercase )
A__ : int = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : Optional[Any] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
A__ : Union[str, Any] = renew_vae_attention_paths(__lowercase )
A__ : List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
return new_checkpoint
def snake_case__ ( __lowercase , __lowercase , ) -> Tuple:
"""simple docstring"""
A__ : List[str] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
A__ : Dict = io.BytesIO(r.content )
A__ : str = OmegaConf.load(__lowercase )
A__ : Any = 5_1_2
A__ : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
A__ : str = {}
with safe_open(__lowercase , framework="pt" , device="cpu" ) as f:
for key in f.keys():
A__ : List[str] = f.get_tensor(__lowercase )
else:
A__ : str = torch.load(__lowercase , map_location=__lowercase )["state_dict"]
# Convert the VAE model.
A__ : List[str] = create_vae_diffusers_config(__lowercase , image_size=__lowercase )
A__ : List[Any] = custom_convert_ldm_vae_checkpoint(__lowercase , __lowercase )
A__ : Dict = AutoencoderKL(**__lowercase )
vae.load_state_dict(__lowercase )
vae.save_pretrained(__lowercase )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
snake_case : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 182 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__lowerCamelCase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase : str = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__lowerCamelCase : List[Any] = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_60_00,
'return_attention_mask': False,
'do_normalize': True,
}
__lowerCamelCase : List[str] = tempfile.mkdtemp()
__lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__lowerCamelCase : List[str] = 'hf-internal-testing/ngram-beam-search-decoder'
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> str:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> str:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Dict = self.get_feature_extractor()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowerCamelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Dict = self.get_feature_extractor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : Any = self.get_decoder()
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = floats_list((3, 10_00) )
__lowerCamelCase : Union[str, Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__lowerCamelCase : Union[str, Any] = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
__lowerCamelCase : Optional[int] = self.get_tokenizer()
__lowerCamelCase : Optional[Any] = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = 'This is a test string'
__lowerCamelCase : Any = processor(text=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=(2, 10, 16) , SCREAMING_SNAKE_CASE_=77 ) -> List[Any]:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = self.get_feature_extractor()
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : Optional[Any] = self.get_decoder()
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowerCamelCase : Optional[int] = processor.decode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Optional[int] = self.get_feature_extractor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowerCamelCase : List[str] = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__lowerCamelCase : str = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__lowerCamelCase : Dict = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : str = self.get_feature_extractor()
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Any = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self._get_dummy_logits()
__lowerCamelCase : int = 15
__lowerCamelCase : Dict = -2_0.0
__lowerCamelCase : Optional[Any] = -4.0
__lowerCamelCase : List[Any] = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = decoded_processor_out.text
__lowerCamelCase : Optional[int] = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__lowerCamelCase : List[Any] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
__lowerCamelCase : Tuple = [d[0][2] for d in decoded_decoder_out]
__lowerCamelCase : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[Any] = self.get_feature_extractor()
__lowerCamelCase : int = self.get_tokenizer()
__lowerCamelCase : int = self.get_decoder()
__lowerCamelCase : Dict = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = self._get_dummy_logits()
__lowerCamelCase : Union[str, Any] = 2.0
__lowerCamelCase : str = 5.0
__lowerCamelCase : int = -2_0.0
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : str = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = decoded_processor_out.text
__lowerCamelCase : int = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__lowerCamelCase : Union[str, Any] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Tuple = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowerCamelCase : str = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowerCamelCase : Optional[int] = os.listdir(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = snapshot_download('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = processor.decoder.model_container[processor.decoder._model_key]
__lowerCamelCase : int = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowerCamelCase : str = os.listdir(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> str:
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Dict = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Tuple = floats_list((3, 10_00) )
__lowerCamelCase : Optional[Any] = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__lowerCamelCase : Optional[int] = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowerCamelCase : int = self._get_dummy_logits()
__lowerCamelCase : Tuple = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = self.get_feature_extractor()
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def lowercase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : int = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : str = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Optional[Any] = self._get_dummy_logits()[0]
__lowerCamelCase : Dict = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Union[str, Any] = self._get_dummy_logits()
__lowerCamelCase : Tuple = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self ) -> Union[str, Any]:
import torch
__lowerCamelCase : Optional[Any] = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_60_00 ) )
__lowerCamelCase : List[str] = iter(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = next(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__lowerCamelCase : Tuple = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowerCamelCase : List[Any] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__lowerCamelCase : Optional[int] = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowerCamelCase : Optional[int] = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__lowerCamelCase : Optional[Any] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__lowerCamelCase : str = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__lowerCamelCase : Tuple = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__lowerCamelCase : Any = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowerCamelCase : str = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 13 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | 0 |
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
A_ = '''facebook/wmt19-en-de'''
A_ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
A_ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
A_ = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
A_ = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
A_ = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
A_ = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 715 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , ):
lowercase = parent
lowercase = 13
lowercase = 7
lowercase = True
lowercase = True
lowercase = False
lowercase = True
lowercase = 99
lowercase = 32
lowercase = 2
lowercase = 4
lowercase = 37
lowercase = 'gelu'
lowercase = 0.1
lowercase = 0.1
lowercase = 512
lowercase = 16
lowercase = 2
lowercase = 0.02
lowercase = 3
lowercase = 4
lowercase = None
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = TFDistilBertModel(config=snake_case )
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase = model(snake_case )
lowercase = [input_ids, input_mask]
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = TFDistilBertForMaskedLM(config=snake_case )
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = TFDistilBertForQuestionAnswering(config=snake_case )
lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
lowercase = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = TFDistilBertForSequenceClassification(snake_case )
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_choices
lowercase = TFDistilBertForMultipleChoice(snake_case )
lowercase = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
lowercase = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
lowercase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = TFDistilBertForTokenClassification(snake_case )
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_UpperCamelCase : int = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TFDistilBertModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , dim=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase = TFDistilBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase = model(snake_case )[0]
lowercase = [1, 6, 768]
self.assertEqual(output.shape , snake_case )
lowercase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1E-4 )
| 84 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =["input_features", "attention_mask"]
def __init__( self , snake_case=8_0 , snake_case=1_6_0_0_0 , snake_case=8_0 , snake_case=0.0 , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> str:
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case)
_UpperCAmelCase : Optional[Any] =num_mel_bins
_UpperCAmelCase : Optional[int] =do_ceptral_normalize
_UpperCAmelCase : Optional[Any] =normalize_means
_UpperCAmelCase : Optional[Any] =normalize_vars
_UpperCAmelCase : Tuple =True
def lowerCAmelCase ( self , snake_case , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : List[Any] =waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_UpperCAmelCase : Dict =torch.from_numpy(snake_case).unsqueeze(0)
_UpperCAmelCase : Union[str, Any] =ta_kaldi.fbank(snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def lowerCAmelCase ( snake_case , snake_case , snake_case = True , snake_case = True , snake_case = 0.0 , ) -> np.ndarray:
'''simple docstring'''
# make sure we normalize float32 arrays
if normalize_means:
_UpperCAmelCase : int =x[:input_length].mean(axis=0)
_UpperCAmelCase : List[Any] =np.subtract(snake_case , snake_case)
if normalize_vars:
_UpperCAmelCase : List[Any] =x[:input_length].std(axis=0)
_UpperCAmelCase : Optional[int] =np.divide(snake_case , snake_case)
if input_length < x.shape[0]:
_UpperCAmelCase : Dict =padding_value
# make sure array is in float32
_UpperCAmelCase : str =x.astype(np.floataa)
return x
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[np.ndarray]:
'''simple docstring'''
_UpperCAmelCase : str =attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(snake_case , snake_case , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(snake_case , snake_case)
]
def __call__( self , snake_case , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
_UpperCAmelCase : Optional[int] =isinstance(snake_case , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
_UpperCAmelCase : List[Any] =is_batched_numpy or (
isinstance(snake_case , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_UpperCAmelCase : int =[np.asarray(snake_case , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray):
_UpperCAmelCase : Tuple =np.asarray(snake_case , dtype=np.floataa)
elif isinstance(snake_case , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_UpperCAmelCase : int =raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_UpperCAmelCase : Dict =[raw_speech]
# extract fbank features
_UpperCAmelCase : Optional[Any] =[self._extract_fbank_features(snake_case) for waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : List[str] =BatchFeature({'input_features': features})
_UpperCAmelCase : Any =self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
_UpperCAmelCase : Dict =padded_inputs.get('input_features')
if isinstance(input_features[0] , snake_case):
_UpperCAmelCase : Any =[np.asarray(snake_case , dtype=np.floataa) for feature in input_features]
_UpperCAmelCase : int =padded_inputs.get('attention_mask')
if attention_mask is not None:
_UpperCAmelCase : Tuple =[np.asarray(snake_case , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_UpperCAmelCase : Optional[Any] =(
np.array(snake_case , dtype=np.intaa)
if self._get_padding_strategies(snake_case , max_length=snake_case) is not PaddingStrategy.DO_NOT_PAD
else None
)
_UpperCAmelCase : Optional[int] =self.normalize(
padded_inputs['input_features'] , attention_mask=snake_case)
if return_tensors is not None:
_UpperCAmelCase : List[Any] =padded_inputs.convert_to_tensors(snake_case)
return padded_inputs
| 446 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = ['image_processor', 'tokenizer']
_A = 'CLIPImageProcessor'
_A = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , __a=None , __a=None , **__a ):
'''simple docstring'''
lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
lowerCamelCase = kwargs.pop("feature_extractor" )
lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__(self , __a=None , __a=None , __a=None , **__a ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCamelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
lowerCamelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
lowerCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def _a (self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a )
def _a (self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a )
@property
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.tokenizer.model_input_names
lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 484 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@property
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.dummy_uncond_unet
lowerCamelCase = KarrasVeScheduler()
lowerCamelCase = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(num_inference_steps=2 , generator=__a , output_type="numpy" ).images
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(num_inference_steps=2 , generator=__a , output_type="numpy" , return_dict=__a )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = "google/ncsnpp-celebahq-256"
lowerCamelCase = UNetaDModel.from_pretrained(__a )
lowerCamelCase = KarrasVeScheduler()
lowerCamelCase = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(num_inference_steps=20 , generator=__a , output_type="numpy" ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
lowerCamelCase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 484 | 1 |
def __UpperCamelCase (lowerCAmelCase : int = 1_000 ) -> int:
A , A = 1, 1
A = 2
while True:
A = 0
A = fa + fa
A , A = fa, f
index += 1
for _ in str(lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 699 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : List[Any] , _snake_case : List[Any] , _snake_case : Optional[Any]=13 , _snake_case : Optional[int]=7 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : str=True , _snake_case : Union[str, Any]=True , _snake_case : Tuple=99 , _snake_case : int=16 , _snake_case : Union[str, Any]=36 , _snake_case : str=6 , _snake_case : str=6 , _snake_case : Union[str, Any]=6 , _snake_case : Optional[int]=37 , _snake_case : Optional[int]="gelu" , _snake_case : Optional[Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : str=512 , _snake_case : Optional[int]=16 , _snake_case : List[Any]=2 , _snake_case : int=0.0_2 , _snake_case : Optional[Any]=3 , _snake_case : str=4 , _snake_case : Tuple=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_hidden_groups
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase ( self : int , _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = AlbertModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case)
UpperCAmelCase_ = model(_snake_case , token_type_ids=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = AlbertForPreTraining(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , sentence_order_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : int , _snake_case : List[str] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = AlbertForMaskedLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : List[Any] , _snake_case : int , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = AlbertForQuestionAnswering(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase ( self : Tuple , _snake_case : str , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForTokenClassification(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : Tuple , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = AlbertForMultipleChoice(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[Any] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = True
def lowerCamelCase ( self : List[str] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : List[str]=False):
"""simple docstring"""
UpperCAmelCase_ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
if return_labels:
if model_class in get_values(_snake_case):
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case)
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case)
return inputs_dict
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = AlbertModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_snake_case)
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AlbertModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = AlbertModel.from_pretrained('''albert-base-v2''')
UpperCAmelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)[0]
UpperCAmelCase_ = torch.Size((1, 11, 768))
self.assertEqual(output.shape , _snake_case)
UpperCAmelCase_ = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1e-4))
| 169 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __snake_case :
def __init__( self : Dict , _snake_case : Optional[int] , _snake_case : int , _snake_case : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''')
UpperCAmelCase_ = img
UpperCAmelCase_ = img.shape[1]
UpperCAmelCase_ = img.shape[0]
UpperCAmelCase_ = dst_width
UpperCAmelCase_ = dst_height
UpperCAmelCase_ = self.src_w / self.dst_w
UpperCAmelCase_ = self.src_h / self.dst_h
UpperCAmelCase_ = UpperCAmelCase_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 255
)
def lowerCamelCase ( self : str):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
UpperCAmelCase_ = self.img[self.get_y(_snake_case)][self.get_x(_snake_case)]
def lowerCamelCase ( self : int , _snake_case : int):
"""simple docstring"""
return int(self.ratio_x * x)
def lowerCamelCase ( self : List[str] , _snake_case : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
snake_case_ , snake_case_ : List[Any] = 800, 600
snake_case_ : Optional[Any] = imread("image_data/lena.jpg", 1)
snake_case_ : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 169 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ ="platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
__lowercase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowercase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowercase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase__ :
def __init__( self : Union[str, Any] , A_ : Union[str, Any] , A_ : Union[str, Any]=1_3 , A_ : Optional[int]=7 , A_ : List[str]=True , A_ : str=False , A_ : List[str]=9_9 , A_ : Any=1_6 , A_ : Dict=2 , A_ : Any=4 , A_ : List[str]=4 , A_ : Dict="gelu" , A_ : Tuple=0.1 , A_ : int=0.1 , A_ : List[Any]=3_2 , A_ : int=2 , A_ : Union[str, Any]=1 , A_ : List[Any]=0 , A_ : str=0.02 , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = initializer_range
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowercase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowercase = shift_tokens_right(A_ , 1 , 2 )
__lowercase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=A_ , )
__lowercase = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase , __lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
__lowercase = 2_0
__lowercase = model_class_name(A_ )
__lowercase = model.encode(inputs_dict["""input_ids"""] )
__lowercase , __lowercase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowercase = model.init_cache(decoder_input_ids.shape[0] , A_ , A_ )
__lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , A_ , decoder_attention_mask=A_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A_ , )
__lowercase = model.decode(A_ , A_ )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE_ ( self : int , A_ : str , A_ : Optional[Any] , A_ : Union[str, Any] ):
'''simple docstring'''
__lowercase = 2_0
__lowercase = model_class_name(A_ )
__lowercase = model.encode(inputs_dict["""input_ids"""] )
__lowercase , __lowercase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowercase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase = model.init_cache(decoder_input_ids.shape[0] , A_ , A_ )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , A_ , decoder_attention_mask=A_ , past_key_values=A_ , decoder_position_ids=A_ , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , A_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A_ , decoder_position_ids=A_ , )
__lowercase = model.decode(A_ , A_ , decoder_attention_mask=A_ )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
a : Tuple = 99
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__lowercase = input_ids.shape[0]
__lowercase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase , __lowercase , __lowercase = self._get_config_and_data()
__lowercase = FlaxBlenderbotForConditionalGeneration(A_ )
__lowercase = lm_model(input_ids=A_ )
__lowercase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__lowercase = FlaxBlenderbotForConditionalGeneration(A_ )
__lowercase = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__lowercase = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__lowercase = lm_model(input_ids=A_ , decoder_input_ids=A_ )
__lowercase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , A_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__lowercase = shift_tokens_right(A_ , 1 , 2 )
__lowercase = np.equal(A_ , 1 ).astype(np.floataa ).sum()
__lowercase = np.equal(A_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(A_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase__ ( _a , unittest.TestCase , _a ):
a : Optional[Any] = True
a : List[str] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a : List[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = FlaxBlenderbotModelTester(self )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A_ , A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A_ , A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(A_ , A_ )
__lowercase = model_class(A_ )
@jax.jit
def encode_jitted(A_ : Tuple , A_ : Optional[Any]=None , **A_ : Optional[Any] ):
return model.encode(input_ids=A_ , attention_mask=A_ )
with self.subTest("""JIT Enabled""" ):
__lowercase = encode_jitted(**A_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = encode_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = model_class(A_ )
__lowercase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__lowercase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A_ : Union[str, Any] , A_ : List[Any] , A_ : Dict ):
return model.decode(
decoder_input_ids=A_ , decoder_attention_mask=A_ , encoder_outputs=A_ , )
with self.subTest("""JIT Enabled""" ):
__lowercase = decode_jitted(**A_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = decode_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowercase = np.ones((1, 1) ) * model.config.eos_token_id
__lowercase = model(A_ )
self.assertIsNotNone(A_ )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5}
__lowercase = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
__lowercase = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=A_ )
__lowercase = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
__lowercase = ["""Sam"""]
__lowercase = tokenizer(A_ , return_tensors="""jax""" )
__lowercase = model.generate(**A_ , **A_ )
__lowercase = """Sam is a great name. It means \"sun\" in Gaelic."""
__lowercase = tokenizer.batch_decode(A_ , **A_ )
assert generated_txt[0].strip() == tgt_text
| 616 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : Any ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(UpperCamelCase__ ):
print(f'''{i}\t\t{d}''' )
def lowerCAmelCase_ ( UpperCamelCase__ : list[dict[str, int]] , UpperCamelCase__ : list[float] , UpperCamelCase__ : int ):
"""simple docstring"""
for j in range(UpperCamelCase__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( UpperCamelCase__ : list[dict[str, int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
__lowercase = [float("""inf""" )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ =int(input("Enter number of vertices: ").strip())
UpperCAmelCase__ =int(input("Enter number of edges: ").strip())
UpperCAmelCase__ =[{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =(
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
UpperCAmelCase__ ={"src": src, "dst": dest, "weight": weight}
UpperCAmelCase__ =int(input("\nEnter shortest path source:").strip())
UpperCAmelCase__ =bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 616 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__UpperCAmelCase = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class __lowercase ( tr.AbstractTransform ):
def __init__( self : List[str] ,A : str = " " ):
'''simple docstring'''
UpperCAmelCase__ : str = sentence_delimiter
def __lowercase ( self : Union[str, Any] ,A : str ):
'''simple docstring'''
return list(A )
def __lowercase ( self : int ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
for sent_idx, sentence in enumerate(A ):
chars.extend(self.process_string(A ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(A ) - 1:
chars.append(self.sentence_delimiter )
return chars
__UpperCAmelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__UpperCAmelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__UpperCAmelCase = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__UpperCAmelCase = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__UpperCAmelCase = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __lowercase ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/jitsi/jiwer/"""] ,reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] ,)
def __lowercase ( self : Union[str, Any] ,A : Optional[Any] ,A : Tuple ,A : str=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
A ,A ,truth_transform=A ,hypothesis_transform=A ,)["wer"]
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Any = 0
for prediction, reference in zip(A ,A ):
UpperCAmelCase__ : List[str] = jiwer.compute_measures(
A ,A ,truth_transform=A ,hypothesis_transform=A ,)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 194 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase ( lowerCAmelCase_ :NDArray[floataa] , lowerCAmelCase_ :NDArray[floataa] , lowerCAmelCase_ :list[int] , lowerCAmelCase_ :int , )->list[float]:
'''simple docstring'''
snake_case_ , snake_case_ = coefficient_matrix.shape
snake_case_ , snake_case_ = constant_matrix.shape
if rowsa != colsa:
snake_case_ = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowerCAmelCase_ )
if colsa != 1:
snake_case_ = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowerCAmelCase_ )
if rowsa != rowsa:
snake_case_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) != rowsa:
snake_case_ = (
"Number of initial values must be equal to number of rows in coefficient "
F'''matrix but received {len(lowerCAmelCase_ )} and {rowsa}'''
)
raise ValueError(lowerCAmelCase_ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
snake_case_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
snake_case_ , snake_case_ = table.shape
strictly_diagonally_dominant(lowerCAmelCase_ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCAmelCase_ ):
snake_case_ = []
for row in range(lowerCAmelCase_ ):
snake_case_ = 0
for col in range(lowerCAmelCase_ ):
if col == row:
snake_case_ = table[row][col]
elif col == cols - 1:
snake_case_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
snake_case_ = (temp + val) / denom
new_val.append(lowerCAmelCase_ )
snake_case_ = new_val
return [float(lowerCAmelCase_ ) for i in new_val]
def _lowerCAmelCase ( lowerCAmelCase_ :NDArray[floataa] )->bool:
'''simple docstring'''
snake_case_ , snake_case_ = table.shape
snake_case_ = True
for i in range(0 , lowerCAmelCase_ ):
snake_case_ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283 |
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int )->int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def _lowerCAmelCase ( )->None:
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 283 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any=7 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=10 , lowerCamelCase : Tuple=18 , lowerCamelCase : Optional[int]=30 , lowerCamelCase : List[str]=400 , lowerCamelCase : List[Any]=True , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , lowerCamelCase : int=[0.5, 0.5, 0.5] , lowerCamelCase : List[str]=None , ) -> Any:
__snake_case : Optional[int] = size if size is not None else {"shortest_edge": 18}
__snake_case : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
__snake_case : Optional[Any] = parent
__snake_case : Optional[int] = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Union[str, Any] = num_frames
__snake_case : Any = image_size
__snake_case : Tuple = min_resolution
__snake_case : Optional[Any] = max_resolution
__snake_case : Any = do_resize
__snake_case : Tuple = size
__snake_case : List[Any] = do_normalize
__snake_case : Optional[int] = image_mean
__snake_case : Any = image_std
__snake_case : str = crop_size
def __snake_case ( self : Dict ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = VivitImageProcessor if is_vision_available() else None
def __snake_case ( self : List[Any] ) -> Optional[Any]:
__snake_case : List[Any] = VivitImageProcessingTester(self )
@property
def __snake_case ( self : int ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : List[Any] ) -> Optional[Any]:
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __snake_case ( self : Dict ) -> Tuple:
__snake_case : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__snake_case : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __snake_case ( self : Any ) -> List[Any]:
__snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__snake_case : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__snake_case : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : str = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : List[str] ) -> List[Any]:
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__snake_case : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__snake_case : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 716 |
import unittest
import numpy as np
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ):
__snake_case : List[str] = np.shape(__lowerCamelCase )
__snake_case : Optional[Any] = np.shape(__lowerCamelCase )
__snake_case : List[str] = np.shape(__lowerCamelCase )
if shape_a[0] != shape_b[0]:
__snake_case : Any = (
"Expected the same number of rows for A and B. "
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(__lowerCamelCase )
if shape_b[1] != shape_c[1]:
__snake_case : int = (
"Expected the same number of columns for B and C. "
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(__lowerCamelCase )
__snake_case : str = pseudo_inv
if a_inv is None:
try:
__snake_case : Optional[Any] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> None:
__snake_case : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : str = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : Dict = np.array([[2, 1], [6, 3]] )
__snake_case : Dict = schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : int = np.block([[a, b], [b.T, c]] )
__snake_case : Optional[int] = np.linalg.det(lowerCamelCase )
__snake_case : Any = np.linalg.det(lowerCamelCase )
__snake_case : Tuple = np.linalg.det(lowerCamelCase )
self.assertAlmostEqual(lowerCamelCase , det_a * det_s )
def __snake_case ( self : int ) -> None:
__snake_case : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] ) -> None:
__snake_case : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 203 | 0 |
'''simple docstring'''
from typing import Any
def __A ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not input_list:
return []
__SCREAMING_SNAKE_CASE : List[str] = [input_list.count(_SCREAMING_SNAKE_CASE ) for value in input_list]
__SCREAMING_SNAKE_CASE : Any = max(_SCREAMING_SNAKE_CASE ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_SCREAMING_SNAKE_CASE ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : int = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case__ : Union[str, Any] = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : Dict = False
snake_case__ : Optional[int] = False
def a_ ( self , a__ , a__ , a__=False ):
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class in get_values(a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ):
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : str = batch_size
__SCREAMING_SNAKE_CASE : int = seq_length
__SCREAMING_SNAKE_CASE : Any = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Any = num_choices
__SCREAMING_SNAKE_CASE : List[str] = scope
__SCREAMING_SNAKE_CASE : Optional[int] = embedding_size
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = TFMobileBertModel(config=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
__SCREAMING_SNAKE_CASE : Any = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE : str = model(a__ )
__SCREAMING_SNAKE_CASE : int = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Tuple = TFMobileBertForMaskedLM(config=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = TFMobileBertForNextSentencePrediction(config=a__ )
__SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Tuple = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = TFMobileBertForPreTraining(config=a__ )
__SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Optional[int] = model(a__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = TFMobileBertForSequenceClassification(config=a__ )
__SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Dict = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
__SCREAMING_SNAKE_CASE : int = TFMobileBertForMultipleChoice(config=a__ )
__SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : int = TFMobileBertForTokenClassification(config=a__ )
__SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Any = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Dict = TFMobileBertForQuestionAnswering(config=a__ )
__SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a__ )
@slow
def a_ ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__SCREAMING_SNAKE_CASE : Any = TFMobileBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE : str = model(a__ )[0]
__SCREAMING_SNAKE_CASE : Dict = [1, 6, 30522]
self.assertEqual(output.shape , a__ )
__SCREAMING_SNAKE_CASE : str = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-4 )
| 211 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__lowerCAmelCase = 5_0_0_0_0_0
__lowerCAmelCase , __lowerCAmelCase = os.path.split(__file__)
__lowerCAmelCase = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( _lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = dataset.map(**_lowerCAmelCase )
@get_duration
def __lowerCamelCase ( _lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = dataset.filter(**_lowerCAmelCase )
def __lowerCamelCase ( ) -> Union[str, Any]:
_UpperCAmelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
_UpperCAmelCase = generate_example_dataset(
os.path.join(_lowerCAmelCase , "dataset.arrow" ) , _lowerCAmelCase , num_examples=_lowerCAmelCase )
_UpperCAmelCase = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=_lowerCAmelCase )
def tokenize(_lowerCAmelCase ):
return tokenizer(examples["text"] )
_UpperCAmelCase = map(_lowerCAmelCase )
_UpperCAmelCase = map(_lowerCAmelCase , batched=_lowerCAmelCase )
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="numpy" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="pandas" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="torch" , columns="numbers" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
_UpperCAmelCase = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
_UpperCAmelCase = map(_lowerCAmelCase , function=_lowerCAmelCase , batched=_lowerCAmelCase )
_UpperCAmelCase = filter(_lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_lowerCAmelCase , "wb" ) as f:
f.write(json.dumps(_lowerCAmelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 129 | 0 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = parent
A : str = 100
A : Any = batch_size
A : int = image_size
A : str = patch_size
A : Dict = num_channels
A : Optional[Any] = is_training
A : List[str] = use_labels
A : str = hidden_size
A : Optional[int] = num_hidden_layers
A : str = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : List[str] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : Optional[Any] = type_sequence_label_size
A : Union[str, Any] = initializer_range
A : Optional[Any] = scope
A : str = out_indices
A : Optional[int] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A : Optional[Any] = (image_size // patch_size) ** 2
A : Optional[int] = num_patches + 1
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[str] = None
A : str = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : str = BeitModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[int] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Union[str, Any] = BeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Dict = self.type_sequence_label_size
A : List[Any] = BeitForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Tuple = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : int = 1
A : Optional[int] = BeitForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : List[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : Union[str, Any] = self.num_labels
A : Dict = BeitForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
A : List[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : str = self.prepare_config_and_inputs()
A, A, A, A : str = config_and_inputs
A : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : int = BeitModelTester(self )
A : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A, A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A, A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[str] = model_class(SCREAMING_SNAKE_CASE )
A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : List[Any] = [*signature.parameters.keys()]
A : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
A, A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]:
continue
A : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
A : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
A : Dict = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A, A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A : Union[str, Any] = False
A : Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
A : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE )
model.train()
A : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
A : Any = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : str = _config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A : List[str] = model_class(config=SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[int] = BeitModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : str = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(SCREAMING_SNAKE_CASE )
A : List[str] = self.default_image_processor
A : int = prepare_img()
A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE )
# prepare bool_masked_pos
A : Tuple = torch.ones((1, 196) , dtype=torch.bool ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : Any = model(pixel_values=SCREAMING_SNAKE_CASE , bool_masked_pos=SCREAMING_SNAKE_CASE )
A : Dict = outputs.logits
# verify the logits
A : List[Any] = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
A : Any = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , SCREAMING_SNAKE_CASE , atol=1e-2 ) )
@slow
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(SCREAMING_SNAKE_CASE )
A : List[str] = self.default_image_processor
A : List[Any] = prepare_img()
A : str = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : Tuple = model(**SCREAMING_SNAKE_CASE )
A : Any = outputs.logits
# verify the logits
A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
A : List[Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Optional[int] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
SCREAMING_SNAKE_CASE )
A : Dict = self.default_image_processor
A : str = prepare_img()
A : Dict = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : List[str] = model(**SCREAMING_SNAKE_CASE )
A : int = outputs.logits
# verify the logits
A : int = torch.Size((1, 21841) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
A : int = 2396
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : str = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
A : Optional[int] = model.to(SCREAMING_SNAKE_CASE )
A : int = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE , size=640 , do_center_crop=SCREAMING_SNAKE_CASE )
A : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
A : Union[str, Any] = Image.open(ds[0]['''file'''] )
A : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : Union[str, Any] = model(**SCREAMING_SNAKE_CASE )
A : Union[str, Any] = outputs.logits
# verify the logits
A : List[Any] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
A : List[str] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
A : Tuple = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=SCREAMING_SNAKE_CASE , )
else:
A : Union[str, Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
A : Optional[Any] = model.to(SCREAMING_SNAKE_CASE )
A : Optional[Any] = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE , size=640 , do_center_crop=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
A : Any = Image.open(ds[0]['''file'''] )
A : Dict = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : List[str] = model(**SCREAMING_SNAKE_CASE )
A : List[str] = outputs.logits.detach().cpu()
A : Any = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)] )
A : Dict = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE )
A : str = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE )
A : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE )
| 634 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
'''simple docstring'''
A : Optional[Any] = [x.strip() for x in open(snake_case__ ).readlines()]
A : Tuple = [x.strip() for x in open(snake_case__ ).readlines()][: len(snake_case__ )]
A : Union[str, Any] = calculate_rouge(snake_case__ , snake_case__ , **snake_case__ )
if save_path is not None:
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 634 | 1 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = [0] * no_of_processes
A = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_snake_case ):
A = burst_time[i]
A = []
A = 0
A = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
A = []
A = -1
for i in range(_snake_case ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_snake_case )
if len(_snake_case ) > 0:
A = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
A = i
total_time += burst_time[target_process]
completed += 1
A = 0
A = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = [0] * no_of_processes
for i in range(_snake_case ):
A = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
_snake_case : Any = 4
_snake_case : Optional[Any] = [2, 5, 3, 7]
_snake_case : int = [0, 0, 0, 0]
_snake_case : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_snake_case : List[str] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 711 |
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = ProphetNetTokenizer
UpperCamelCase = False
def lowerCamelCase ( self :Any ):
super().setUp()
A = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase ( self :Any , __UpperCamelCase :List[str] ):
A = "UNwant\u00E9d,running"
A = "unwanted, running"
return input_text, output_text
def lowerCamelCase ( self :Optional[Any] ):
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__UpperCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase ( self :Optional[int] ):
A = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowerCamelCase ( self :List[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Any ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowerCamelCase ( self :Tuple ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Optional[int] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCamelCase ( self :Dict ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :List[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :Optional[Any] ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase ( self :Dict ):
A = BasicTokenizer(do_lower_case=__UpperCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCamelCase ( self :List[Any] ):
A = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
A = {}
for i, token in enumerate(__UpperCamelCase ):
A = i
A = WordpieceTokenizer(vocab=__UpperCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
A = ["A long paragraph for summarization.", "Another paragraph for summarization."]
A = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
A = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors="pt" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
A = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCamelCase ( self :Optional[Any] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCamelCase ( self :Any ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCamelCase ( self :List[Any] ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def lowerCamelCase ( self :Dict ):
A = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
A = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCamelCase )
A = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCamelCase )
A = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
A = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 524 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowercase_ = get_logger()
lowercase_ = None
class __A ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__(self , A=None , A=None , **A ) -> List[str]:
"""simple docstring"""
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
_a = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_a = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
_a = str(jax.devices()[0] )
_a = jnp_array_kwargs
@staticmethod
def a__ () -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(A ): device for device in jax.devices()}
def a__ (self , A ) -> int:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def a__ (self , A ) -> str:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_a = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_a = {'''dtype''': jnp.intaa}
else:
_a = {'''dtype''': jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_a = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
_a = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_a = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , '''__array__''' ) and not isinstance(A , jax.Array ):
_a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def a__ (self , A ) -> Dict:
"""simple docstring"""
return map_nested(self._recursive_tensorize , A , map_list=A )
def a__ (self , A ) -> Mapping:
"""simple docstring"""
_a = self.numpy_arrow_extractor().extract_row(A )
_a = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def a__ (self , A ) -> "jax.Array":
"""simple docstring"""
_a = self.numpy_arrow_extractor().extract_column(A )
_a = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
_a = self.recursive_tensorize(A )
_a = self._consolidate(A )
return column
def a__ (self , A ) -> Mapping:
"""simple docstring"""
_a = self.numpy_arrow_extractor().extract_batch(A )
_a = self.python_features_decoder.decode_batch(A )
_a = self.recursive_tensorize(A )
for column_name in batch:
_a = self._consolidate(batch[column_name] )
return batch
| 11 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
SCREAMING_SNAKE_CASE = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
SCREAMING_SNAKE_CASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def lowercase_ ( __A : str ) -> dict[str, int]:
"""simple docstring"""
lowercase : List[str] ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowercase_ ( __A : tuple ) -> str:
"""simple docstring"""
return x[0]
def lowercase_ ( __A : str ) -> str:
"""simple docstring"""
lowercase : Optional[Any] =get_letter_count(__A )
lowercase : dict[int, list[str]] ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__A )
lowercase : dict[int, str] ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__A )
lowercase : Optional[int] =''''''.join(freq_to_letter[freq] )
lowercase : str =list(freq_to_letter_str.items() )
freq_pairs.sort(key=__A , reverse=__A )
lowercase : list[str] =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(__A )
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : Optional[Any] =get_frequency_order(__A )
lowercase : Union[str, Any] =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __a ( yaml.SafeLoader ):
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.constructed_objects[key_node] for key_node, _ in node.value]
__SCREAMING_SNAKE_CASE = [tuple(_lowercase ) if isinstance(_lowercase ,_lowercase ) else key for key in keys]
__SCREAMING_SNAKE_CASE = Counter(_lowercase )
__SCREAMING_SNAKE_CASE = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : int ,lowerCamelCase : List[str]=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().construct_mapping(_lowercase ,deep=_lowercase )
self._check_no_duplicates_on_constructed_node(_lowercase )
return mapping
def __magic_name__ ( __UpperCAmelCase ) -> Tuple[Optional[str], str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__SCREAMING_SNAKE_CASE = full_content[1:].index("""---""" ) + 1
__SCREAMING_SNAKE_CASE = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__UpperCAmelCase )
class __a ( UpperCAmelCase_ ):
__UpperCamelCase : Union[str, Any] = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
with open(_lowercase ,encoding="""utf-8""" ) as readme_file:
__SCREAMING_SNAKE_CASE = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_lowercase )
else:
return cls()
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : List[Any] ):
'''simple docstring'''
if path.exists():
with open(_lowercase ,encoding="""utf-8""" ) as readme_file:
__SCREAMING_SNAKE_CASE = readme_file.read()
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self._to_readme(_lowercase )
with open(_lowercase ,"""w""" ,encoding="""utf-8""" ) as readme_file:
readme_file.write(_lowercase )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Tuple = None ):
'''simple docstring'''
if readme_content is not None:
__SCREAMING_SNAKE_CASE = _split_yaml_from_readme(_lowercase )
__SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = yaml.load(_lowercase ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__SCREAMING_SNAKE_CASE = {
(key.replace("""-""" ,"""_""" ) if key.replace("""-""" ,"""_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowercase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" ,"""-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=_lowercase ,allow_unicode=_lowercase ,encoding="""utf-8""" ,).decode("""utf-8""" )
a = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
a = ap.parse_args()
a = Path(args.readme_filepath)
a = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 707 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A__: Any = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A__: List[str] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('''\n'''.join(upper_files) + '''\n''')
A__: str = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('''\n'''.join(space_files) + '''\n''')
A__: List[str] = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('''\n'''.join(hyphen_files) + '''\n''')
A__: Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('''\n'''.join(nodir_files) + '''\n''')
A__: Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 694 |
'''simple docstring'''
def snake_case ( snake_case : dict ) -> set:
"""simple docstring"""
lowerCAmelCase = set()
# edges = list of graph's edges
lowerCAmelCase = get_edges(snake_case )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase , lowerCAmelCase = edges.pop()
chosen_vertices.add(snake_case )
chosen_vertices.add(snake_case )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(snake_case )
return chosen_vertices
def snake_case ( snake_case : dict ) -> set:
"""simple docstring"""
lowerCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 284 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A_ = 192
A_ = 768
A_ = 12
A_ = 3
A_ = [800, 1333]
A_ = False
elif yolos_name == "yolos_s_dWr":
A_ = 330
A_ = 14
A_ = 6
A_ = 1320
elif "yolos_s" in yolos_name:
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
elif "yolos_b" in yolos_name:
A_ = [800, 1344]
A_ = 91
A_ = '''huggingface/label-files'''
A_ = '''coco-detection-id2label.json'''
A_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
A_ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[: config.hidden_size, :]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[-config.hidden_size :, :]
A_ = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if "backbone" in name:
A_ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
A_ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
A_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
A_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
A_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
A_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
A_ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
A_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
A_ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
A_ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
A_ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
A_ = key.split('''.''' )
A_ = int(key_split[2] )
A_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A_ = val[:dim, :]
A_ = val[
dim : dim * 2, :
]
A_ = val[-dim:, :]
else:
A_ = val[:dim]
A_ = val[dim : dim * 2]
A_ = val[-dim:]
else:
A_ = val
return orig_state_dict
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
A_ = get_yolos_config(SCREAMING_SNAKE_CASE )
# load original state_dict
A_ = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
# load 🤗 model
A_ = YolosForObjectDetection(SCREAMING_SNAKE_CASE )
model.eval()
A_ = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by YolosImageProcessor
A_ = 800 if yolos_name != '''yolos_ti''' else 512
A_ = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE )
A_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A_ = model(**SCREAMING_SNAKE_CASE )
A_ ,A_ = outputs.logits, outputs.pred_boxes
A_ ,A_ = None, None
if yolos_name == "yolos_ti":
A_ = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
A_ = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
A_ = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
A_ = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
A_ = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
A_ = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
A_ = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
A_ = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
A_ = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
A_ = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
A_ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
A_ = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE , organization='''hustvl''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 706 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
A_ = 4
A_ = (1 << p) - 1
for _ in range(p - 2 ):
A_ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 563 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : int = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "big_bird"
def __init__( self , a__=50358 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=4096 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=True , a__=0 , a__=1 , a__=2 , a__=66 , a__="block_sparse" , a__=True , a__=False , a__=64 , a__=3 , a__=None , **a__ , ):
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : Optional[int] = use_cache
_lowerCAmelCase : List[Any] = rescale_embeddings
_lowerCAmelCase : Any = attention_type
_lowerCAmelCase : List[Any] = use_bias
_lowerCAmelCase : Dict = block_size
_lowerCAmelCase : Dict = num_random_blocks
_lowerCAmelCase : str = classifier_dropout
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def __A ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 213 | 1 |
import itertools
import math
def A_( A ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_( ):
UpperCAmelCase_ = 2
while True:
if is_prime(A ):
yield num
num += 1
def A_( A = 10001 ):
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 486 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[Any] = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 486 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 307 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaControlnetImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image", "hint"]
_a = ["image_embeds", "negative_image_embeds", "image", "hint"]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> Dict:
return 32
@property
def a__ ( self ) -> Tuple:
return 32
@property
def a__ ( self ) -> str:
return self.time_input_dim
@property
def a__ ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> Tuple:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Any = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Any = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> Dict:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> List[str]:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> Union[str, Any]:
_A : str = self.dummy_unet
_A : Any = self.dummy_movq
_A : Dict = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : Optional[int] = DDIMScheduler(**_a )
_A : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> Optional[int]:
_A : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Tuple = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
_A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
_A : str = torch.manual_seed(_a )
else:
_A : Any = torch.Generator(device=_a ).manual_seed(_a )
_A : Any = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Tuple:
_A : int = """cpu"""
_A : Any = self.get_dummy_components()
_A : List[str] = self.pipeline_class(**_a )
_A : Optional[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : str = pipe(**self.get_dummy_inputs(_a ) )
_A : Any = output.images
_A : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : List[Any] = image[0, -3:, -3:, -1]
_A : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[Any] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Tuple:
_A : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
_A : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : int = init_image.resize((512, 512) )
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
_A : Tuple = torch.from_numpy(np.array(_a ) ).float() / 255.0
_A : List[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_A : Optional[int] = """A robot, 4k photo"""
_A : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
_A : Tuple = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : Union[str, Any] = pipe_prior(
_a , image=_a , strength=0.85 , generator=_a , negative_prompt="""""" , ).to_tuple()
_A : Optional[Any] = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , hint=_a , generator=_a , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
_A : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a , _a )
| 307 | 1 |
snake_case_ : int =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
snake_case_ : list[bool | None] =[None] * 10000000
snake_case_ : Dict =True
snake_case_ : Dict =False
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__A = chain(next_number(lowerCAmelCase__ ) )
__A = number_chain
while number < 1000_0000:
__A = number_chain
number *= 10
return number_chain
def UpperCAmelCase ( lowerCAmelCase__ = 1000_0000 ):
'''simple docstring'''
for i in range(1 , lowerCAmelCase__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 205 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ : List[str] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any =['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case_ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 205 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = filter(lambda SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
UpperCamelCase : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__magic_name__ : Tuple = logging.getLogger(__name__)
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if metric == "rouge2":
UpperCamelCase : Tuple = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
UpperCamelCase : int = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
UpperCamelCase : Union[str, Any] = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
UpperCamelCase : Tuple = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
""" function.""" )
UpperCamelCase : int = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , monitor=f"""val_{metric}""" , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , )
class lowercase__ ( pl.Callback ):
"""simple docstring"""
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : str = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def _a ( self , _A , _A , _A , _A=True ):
'''simple docstring'''
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCamelCase : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
UpperCamelCase : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase : Tuple = od / """test_results.txt"""
UpperCamelCase : List[str] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase : int = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCamelCase : str = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , """a+""" ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase : int = metrics[key]
if isinstance(_A , torch.Tensor ):
UpperCamelCase : Optional[int] = val.item()
UpperCamelCase : Union[str, Any] = f"""{key}: {val:.6f}\n"""
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase : int = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_A )
@rank_zero_only
def _a ( self , _A , _A ):
'''simple docstring'''
try:
UpperCamelCase : Any = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase : Any = pl_module.model.num_parameters()
UpperCamelCase : Any = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def _a ( self , _A , _A ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , """test""" )
@rank_zero_only
def _a ( self , _A , _A ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 102 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75 | 0 |
'''simple docstring'''
import requests
SCREAMING_SNAKE_CASE__ = "YOUR API KEY"
def lowerCamelCase ( _snake_case : str ,_snake_case : str = giphy_api_key ):
'''simple docstring'''
lowercase__ = "+".join(query.split() )
lowercase__ = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
lowercase__ = requests.get(_snake_case ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 720 |
'''simple docstring'''
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 539 | 0 |
from collections.abc import Callable
import numpy as np
def lowercase__ ( __snake_case : Callable , __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ):
'''simple docstring'''
UpperCAmelCase_ : str = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase_ : Union[str, Any] = np.zeros((n + 1,) )
UpperCAmelCase_ : Union[str, Any] = ya
UpperCAmelCase_ : Tuple = xa
for k in range(__snake_case ):
UpperCAmelCase_ : str = y[k] + step_size * ode_func(__snake_case , y[k] )
UpperCAmelCase_ : Any = y[k] + (
(step_size / 2) * (ode_func(__snake_case , y[k] ) + ode_func(x + step_size , __snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 406 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Any = OmegaConf.load(__snake_case )
UpperCAmelCase_ : List[Any] = torch.load(__snake_case , map_location='cpu' )['model']
UpperCAmelCase_ : Any = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : List[Any] = 'first_stage_model.'
for key in keys:
if key.startswith(__snake_case ):
UpperCAmelCase_ : Any = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : int = 'model.diffusion_model.'
for key in keys:
if key.startswith(__snake_case ):
UpperCAmelCase_ : int = state_dict[key]
UpperCAmelCase_ : Tuple = config.model.params.first_stage_config.params
UpperCAmelCase_ : Optional[Any] = config.model.params.unet_config.params
UpperCAmelCase_ : Any = VQModel(**__snake_case ).eval()
vqvae.load_state_dict(__snake_case )
UpperCAmelCase_ : Optional[int] = UNetLDMModel(**__snake_case ).eval()
unet.load_state_dict(__snake_case )
UpperCAmelCase_ : Tuple = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__snake_case , )
UpperCAmelCase_ : Union[str, Any] = LDMPipeline(__snake_case , __snake_case , __snake_case )
pipeline.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 406 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase__ = logging.get_logger(__name__)
lowercase__ = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _snake_case ( lowercase__ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCamelCase : int = model_type_to_module_name(_lowercase )
_lowerCamelCase : List[Any] = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '__name__' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCamelCase : str = importlib.import_module('transformers' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def _snake_case ( lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , **lowercase__ , ):
_lowerCamelCase : List[Any] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_lowercase , encoding='utf-8' ) as reader:
return json.load(_lowercase )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase_ )
def A_ ( cls , lowercase , **lowercase ):
_lowerCamelCase : List[Any] = kwargs.pop('config' , UpperCamelCase_ )
_lowerCamelCase : Optional[int] = kwargs.pop('trust_remote_code' , UpperCamelCase_ )
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Any = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase_ , **UpperCamelCase_ )
_lowerCamelCase : List[str] = config_dict.get('feature_extractor_type' , UpperCamelCase_ )
_lowerCamelCase : Dict = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase : Tuple = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowerCamelCase : Any = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# It could be in `config.feature_extractor_type``
_lowerCamelCase : Union[str, Any] = getattr(UpperCamelCase_ , 'feature_extractor_type' , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
_lowerCamelCase : Optional[Any] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
_lowerCamelCase : Any = feature_extractor_class_from_name(UpperCamelCase_ )
_lowerCamelCase : str = feature_extractor_auto_map is not None
_lowerCamelCase : str = feature_extractor_class is not None or type(UpperCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING
_lowerCamelCase : List[Any] = resolve_trust_remote_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if has_remote_code and trust_remote_code:
_lowerCamelCase : str = get_class_from_dynamic_module(
UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
_lowerCamelCase : int = kwargs.pop('code_revision' , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
_lowerCamelCase : List[str] = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase_ )]
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def A_ ( lowercase , lowercase ):
FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ ) | 704 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase__ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
lowercase__ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
lowercase__ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def A_ ( self , lowercase , lowercase , lowercase = 1 , lowercase = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase , hypotheses=lowercase , min_len=lowercase , max_len=lowercase )
} | 492 | 0 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=4 , )-> Tuple:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def __magic_name__ ( self )-> str:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=A_ , )
return config, input_ids, attention_mask
def __magic_name__ ( self )-> int:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self )-> Tuple:
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def __magic_name__ ( self )-> Union[str, Any]:
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('distilbert-base-uncased' )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self )-> str:
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(A_ , attention_mask=A_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , A_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) )
| 605 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
__UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'}
__UpperCAmelCase = '>>zh<<'
__UpperCAmelCase = 'Helsinki-NLP/'
if is_torch_available():
__UpperCAmelCase = 'pt'
elif is_tf_available():
__UpperCAmelCase = 'tf'
else:
__UpperCAmelCase = 'jax'
@require_sentencepiece
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = MarianTokenizer
snake_case_ = False
snake_case_ = True
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
UpperCAmelCase__ : int = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[int] = Path(self.tmpdirname )
save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(A ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(A ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(A ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
UpperCAmelCase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : List[Any] ,**A : List[Any] ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : Union[str, Any] ,A : Tuple ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = """</s>"""
UpperCAmelCase__ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""</s>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""<pad>""" )
self.assertEqual(len(A ) ,9 )
def __lowercase ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,9 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
UpperCAmelCase__ : List[str] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=A )
self.assertIsInstance(A ,A )
UpperCAmelCase__ : str = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(A ,batch.input_ids[0] )
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(A )
UpperCAmelCase__ : Tuple = [x.name for x in Path(A ).glob("""*""" )]
self.assertIn("""source.spm""" ,A )
MarianTokenizer.from_pretrained(A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] ,padding=A ,truncation=A ,return_tensors=A )
self.assertIsInstance(A ,A )
self.assertEqual(batch.input_ids.shape ,(2, 512) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=A ,return_tensors=A )
self.assertIsInstance(A ,A )
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
# fmt: off
UpperCAmelCase__ : Optional[int] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
UpperCAmelCase__ : Any = """Tämä on testi"""
UpperCAmelCase__ : int = """This is a test"""
UpperCAmelCase__ : List[str] = [76, 7, 2_047, 2]
UpperCAmelCase__ : Optional[Any] = [69, 12, 11, 940, 2]
UpperCAmelCase__ : List[str] = tokenizer(A ).input_ids
self.assertListEqual(A ,A )
UpperCAmelCase__ : Optional[int] = tokenizer(text_target=A ).input_ids
self.assertListEqual(A ,A )
UpperCAmelCase__ : int = tokenizer.decode(A ,skip_special_tokens=A )
self.assertEqual(A ,A )
| 65 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[int]="attention" )-> int:
A_ = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
A_ = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
A_ = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
A_ = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def lowerCAmelCase ( snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[Any]=False )-> List[Any]:
if split_mlp_wi:
A_ = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
A_ = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
A_ = (wi_a, wi_a)
else:
A_ = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
A_ = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def lowerCAmelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Any , snake_case__ : Optional[int] )-> int:
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def lowerCAmelCase ( snake_case__ : dict , *, snake_case__ : int , snake_case__ : bool )-> Union[str, Any]:
A_ = traverse_util.flatten_dict(variables["target"] )
A_ = {"/".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A_ = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , snake_case__ )
A_ = collections.OrderedDict()
# Shared embeddings.
A_ = old["token_embedder/embedding"]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
A_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "encoder" , "pre_attention_layer_norm" )
A_ , A_ , A_ , A_ = tax_attention_lookup(snake_case__ , snake_case__ , "encoder" , "attention" )
A_ = layer_norm
A_ = k.T
A_ = o.T
A_ = q.T
A_ = v.T
# Block i, layer 1 (MLP).
A_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "encoder" , "pre_mlp_layer_norm" )
A_ , A_ = tax_mlp_lookup(snake_case__ , snake_case__ , "encoder" , snake_case__ )
A_ = layer_norm
if split_mlp_wi:
A_ = wi[0].T
A_ = wi[1].T
else:
A_ = wi.T
A_ = wo.T
A_ = old[
"encoder/relpos_bias/rel_embedding"
].T
A_ = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
A_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_self_attention_layer_norm" )
A_ , A_ , A_ , A_ = tax_attention_lookup(snake_case__ , snake_case__ , "decoder" , "self_attention" )
A_ = layer_norm
A_ = k.T
A_ = o.T
A_ = q.T
A_ = v.T
# Block i, layer 1 (Cross Attention).
A_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_cross_attention_layer_norm" )
A_ , A_ , A_ , A_ = tax_attention_lookup(snake_case__ , snake_case__ , "decoder" , "encoder_decoder_attention" )
A_ = layer_norm
A_ = k.T
A_ = o.T
A_ = q.T
A_ = v.T
# Block i, layer 2 (MLP).
A_ = tax_layer_norm_lookup(snake_case__ , snake_case__ , "decoder" , "pre_mlp_layer_norm" )
A_ , A_ = tax_mlp_lookup(snake_case__ , snake_case__ , "decoder" , snake_case__ )
A_ = layer_norm
if split_mlp_wi:
A_ = wi[0].T
A_ = wi[1].T
else:
A_ = wi.T
A_ = wo.T
A_ = old["decoder/decoder_norm/scale"]
A_ = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A_ = old["decoder/logits_dense/kernel"].T
return new
def lowerCAmelCase ( snake_case__ : int , snake_case__ : bool )-> Any:
A_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
A_ = state_dict["shared.weight"]
return state_dict
def lowerCAmelCase ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : int )-> List[Any]:
A_ = checkpoints.load_tax_checkpoint(snake_case__ )
A_ = convert_tax_to_pytorch(snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ )
A_ = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def lowerCAmelCase ( snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : bool = False )-> Optional[int]:
A_ = TaConfig.from_json_file(snake_case__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A_ = TaEncoderModel(snake_case__ )
else:
A_ = TaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("Done" )
if __name__ == "__main__":
__magic_name__ : Optional[int] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__magic_name__ : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 608 |
from __future__ import annotations
from math import pi, sqrt
def lowerCAmelCase ( snake_case__ : float , snake_case__ : float )-> tuple:
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 | 1 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__A = logging.get_logger(__name__)
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: Optional[Any] = set()
__lowerCAmelCase: int = []
def parse_line(__SCREAMING_SNAKE_CASE ):
for line in fp:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Optional[Any] = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Any = "\n".join(__SCREAMING_SNAKE_CASE )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(__SCREAMING_SNAKE_CASE )
buffer.clear()
continue
else:
__lowerCAmelCase: Any = line.strip()
buffer.append(__SCREAMING_SNAKE_CASE )
if from_gh:
for filename in os.listdir(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[str] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with open(__SCREAMING_SNAKE_CASE ) as fp:
parse_line(__SCREAMING_SNAKE_CASE )
else:
try:
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__SCREAMING_SNAKE_CASE ) as fp:
parse_line(__SCREAMING_SNAKE_CASE )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase: str = set()
__lowerCAmelCase: Union[str, Any] = [os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for p in os.listdir(__SCREAMING_SNAKE_CASE ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
return selected_warnings
if __name__ == "__main__":
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
return values.split("," )
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
__A = parser.parse_args()
__A = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__A = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__A = extract_warnings(args.output_dir, args.targets)
__A = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 346 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCAmelCase: Optional[Any] = model_type_to_module_name(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__SCREAMING_SNAKE_CASE , "__name__" , __SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCAmelCase: Union[str, Any] = importlib.import_module("transformers" )
if hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return None
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) -> Optional[int]:
__lowerCAmelCase: Optional[Any] = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(__SCREAMING_SNAKE_CASE , encoding="utf-8" ) as reader:
return json.load(__SCREAMING_SNAKE_CASE )
class snake_case :
def __init__( self : List[Any])-> Dict:
'''simple docstring'''
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase__)
def lowercase_ ( cls : List[str] , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int])-> int:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = kwargs.pop("config" , UpperCamelCase__)
__lowerCAmelCase: Any = kwargs.pop("trust_remote_code" , UpperCamelCase__)
__lowerCAmelCase: Dict = True
__lowerCAmelCase , __lowerCAmelCase: List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: str = config_dict.get("feature_extractor_type" , UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {}):
__lowerCAmelCase: Dict = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Optional[int] = AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
# It could be in `config.feature_extractor_type``
__lowerCAmelCase: List[Any] = getattr(UpperCamelCase__ , "feature_extractor_type" , UpperCamelCase__)
if hasattr(UpperCamelCase__ , "auto_map") and "AutoFeatureExtractor" in config.auto_map:
__lowerCAmelCase: Union[str, Any] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
__lowerCAmelCase: List[str] = feature_extractor_class_from_name(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = feature_extractor_auto_map is not None
__lowerCAmelCase: Union[str, Any] = feature_extractor_class is not None or type(UpperCamelCase__) in FEATURE_EXTRACTOR_MAPPING
__lowerCAmelCase: Optional[Any] = resolve_trust_remote_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
if has_remote_code and trust_remote_code:
__lowerCAmelCase: List[str] = get_class_from_dynamic_module(
UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Any = kwargs.pop("code_revision" , UpperCamelCase__)
if os.path.isdir(UpperCamelCase__):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCamelCase__) in FEATURE_EXTRACTOR_MAPPING:
__lowerCAmelCase: Tuple = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase__)]
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__)
raise ValueError(
f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}")
@staticmethod
def lowercase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int])-> Tuple:
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase__ , UpperCamelCase__)
| 346 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[Any] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = '''visual_bert'''
def __init__( self : int , __lowerCAmelCase : Optional[Any]=3_05_22 , __lowerCAmelCase : Dict=7_68 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : Optional[Any]=30_72 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Optional[int]=5_12 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : List[Any]=1e-12 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=True , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : Optional[Any]=2 , **__lowerCAmelCase : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = visual_embedding_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
A__ = bypass_transformer
A__ = special_visual_initialize
| 247 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : str = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : List[str] = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
UpperCamelCase : List[str] = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ElectraTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : Any = parser.parse_args()
main(args)
| 50 | 1 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str=10_24 ) -> List[str]:
snake_case , snake_case = [], []
snake_case = list(zip(__lowerCAmelCase , __lowerCAmelCase ) )
snake_case , snake_case = sorted_examples[0]
def is_too_big(__lowerCAmelCase : Tuple ):
return tok(__lowerCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case = new_src + """ """ + src
snake_case = new_tgt + """ """ + tgt
if is_too_big(__lowerCAmelCase ) or is_too_big(__lowerCAmelCase ): # cant fit, finalize example
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
snake_case , snake_case = src, tgt
else: # can fit, keep adding
snake_case , snake_case = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
return finished_src, finished_tgt
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Path , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> List[str]:
snake_case = Path(__lowerCAmelCase )
save_path.mkdir(exist_ok=__lowerCAmelCase )
for split in ["train"]:
snake_case , snake_case = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
snake_case = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
snake_case = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
snake_case , snake_case = pack_examples(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
print(F'''packed {split} split from {len(__lowerCAmelCase )} examples -> {len(__lowerCAmelCase )}.''' )
Path(save_path / F'''{split}.source''' ).open("""w""" ).write("""\n""".join(__lowerCAmelCase ) )
Path(save_path / F'''{split}.target''' ).open("""w""" ).write("""\n""".join(__lowerCAmelCase ) )
for split in ["val", "test"]:
snake_case , snake_case = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(__lowerCAmelCase , save_path / F'''{split}.source''' )
shutil.copyfile(__lowerCAmelCase , save_path / F'''{split}.target''' )
def __lowerCamelCase ( ) -> Union[str, Any]:
snake_case = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=__lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=__lowerCAmelCase , default=1_28 )
parser.add_argument("""--data_dir""" , type=__lowerCAmelCase )
parser.add_argument("""--save_path""" , type=__lowerCAmelCase )
snake_case = parser.parse_args()
snake_case = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 713 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_SCREAMING_SNAKE_CASE = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE = direct_transformers_import(PATH_TO_TRANSFORMERS)
_SCREAMING_SNAKE_CASE = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ) -> List[str]:
snake_case = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
snake_case = True
# Deal with multi-line cases
elif (
re.search(
rF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , __lowerCAmelCase , )
is not None
):
snake_case = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
snake_case = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
snake_case = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
snake_case = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
snake_case = True
if not attribute_used:
snake_case = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
snake_case = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
snake_case = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
snake_case = True
elif attribute.endswith("""_token_id""" ):
snake_case = True
# configuration class specific cases
if not case_allowed:
snake_case = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
snake_case = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __lowerCamelCase ( __lowerCAmelCase : int ) -> Union[str, Any]:
snake_case = dict(inspect.signature(config_class.__init__ ).parameters )
snake_case = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
snake_case = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
snake_case = {}
if len(config_class.attribute_map ) > 0:
snake_case = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
snake_case = inspect.getsourcefile(__lowerCAmelCase )
snake_case = os.path.dirname(__lowerCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
snake_case = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for fn in os.listdir(__lowerCAmelCase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
snake_case = []
for path in modeling_paths:
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as fp:
modeling_sources.append(fp.read() )
snake_case = []
for config_param, default_value in zip(__lowerCAmelCase , __lowerCAmelCase ):
# `attributes` here is all the variant names for `config_param`
snake_case = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(__lowerCAmelCase )
def __lowerCamelCase ( ) -> Optional[Any]:
snake_case = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
snake_case = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __lowerCAmelCase : inspect.isclass(__lowerCAmelCase )
and issubclass(__lowerCAmelCase , __lowerCAmelCase )
and inspect.getmodule(__lowerCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
snake_case = check_config_attributes_being_used(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case = unused_attributes
if len(__lowerCAmelCase ) > 0:
snake_case = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 517 | 0 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase_ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase_ = logging.get_logger(__name__)
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = 'maskformer'
__lowerCamelCase : int = {'hidden_size': 'mask_feature_size'}
__lowerCamelCase : List[str] = ['resnet', 'swin']
__lowerCamelCase : Dict = ['detr']
def __init__(self , A = 256 , A = 256 , A = 0.1 , A = False , A = None , A = None , A = 0.02 , A = 1.0 , A = 1.0 , A = 1.0 , A = 20.0 , A = None , **A , ) -> List[Any]:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(A , A ):
_a = backbone_config.pop('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_a = DetrConfig()
else:
# verify that the decoder is supported
_a = (
decoder_config.pop('''model_type''' ) if isinstance(A , A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(A , A ):
_a = CONFIG_MAPPING[decoder_type]
_a = config_class.from_dict(A )
_a = backbone_config
_a = decoder_config
# main feature dimension for the model
_a = fpn_feature_size
_a = mask_feature_size
# initializer
_a = init_std
_a = init_xavier_std
# Hungarian matcher && loss
_a = cross_entropy_weight
_a = dice_weight
_a = mask_weight
_a = use_auxiliary_loss
_a = no_object_weight
_a = output_auxiliary_logits
_a = self.decoder_config.encoder_attention_heads
_a = self.decoder_config.num_hidden_layers
super().__init__(**A )
@classmethod
def a__ (cls , A , A , **A ) -> Optional[Any]:
"""simple docstring"""
return cls(
backbone_config=A , decoder_config=A , **A , )
def a__ (self ) -> Dict[str, any]:
"""simple docstring"""
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.decoder_config.to_dict()
_a = self.__class__.model_type
return output
| 11 |
"""simple docstring"""
import operator
def __magic_name__ ( UpperCamelCase : list , UpperCamelCase : bool = False , UpperCamelCase : list | None = None ) -> list:
a__ = operator.lt if reverse else operator.gt
a__ = solution or []
if not arr:
return solution
a__ = [arr.pop(0 )]
for i, item in enumerate(UpperCamelCase ):
if _operator(UpperCamelCase , sublist[-1] ):
sublist.append(UpperCamelCase )
arr.pop(UpperCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(UpperCamelCase )
else:
while sublist:
a__ = sublist.pop(0 )
for i, xx in enumerate(UpperCamelCase ):
if not _operator(UpperCamelCase , UpperCamelCase ):
solution.insert(UpperCamelCase , UpperCamelCase )
break
else:
solution.append(UpperCamelCase )
strand_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 273 | 0 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
a : List[Any] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
a : Optional[int] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
a : Optional[int] = BeautifulSoup(res.text, 'html.parser')
a : Tuple = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get("href")}""")
| 721 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowercase (snake_case__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def lowercase (snake_case__ : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase = emb.weight.data
return lin_layer
def lowercase (snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCAmelCase = Namespace(**checkpoint["""cfg"""]["""model"""] )
lowerCAmelCase = checkpoint["""model"""]
remove_ignore_keys_(snake_case__ )
lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCAmelCase = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
lowerCAmelCase = XGLMConfig(
vocab_size=snake_case__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase = XGLMForCausalLM(snake_case__ )
lowerCAmelCase = model.load_state_dict(snake_case__ , strict=snake_case__ )
print(snake_case__ )
lowerCAmelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a = parser.parse_args()
a = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 529 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase ):
@register_to_config
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None )-> Optional[int]:
'''simple docstring'''
super().__init__()
lowerCAmelCase__ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowerCAmelCase__ = torch.zeros(__UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = torch.nn.Parameter(__UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
a_ =42
a_ =42
a_ =42
a_ =42
a_ =42
a_ =42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )-> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1
# get prompt text embeddings
lowerCAmelCase__ = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowerCAmelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowerCAmelCase__ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
lowerCAmelCase__ = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowerCAmelCase__ = self.learned_classifier_free_sampling_embeddings.embeddings
lowerCAmelCase__ = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 )
else:
lowerCAmelCase__ = [""] * batch_size
lowerCAmelCase__ = text_input_ids.shape[-1]
lowerCAmelCase__ = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
lowerCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowerCAmelCase__ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ = negative_prompt_embeds.shape[1]
lowerCAmelCase__ = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 )
lowerCAmelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = 100 , __UpperCAmelCase = 5.0 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = len(__UpperCAmelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}" )
lowerCAmelCase__ = batch_size * num_images_per_prompt
lowerCAmelCase__ = guidance_scale > 1.0
lowerCAmelCase__ = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__UpperCAmelCase )}." )
# get the initial completely masked latents unless the user supplied it
lowerCAmelCase__ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowerCAmelCase__ = self.transformer.num_vector_embeds - 1
lowerCAmelCase__ = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F" {self.transformer.num_vector_embeds - 1} (inclusive)." )
lowerCAmelCase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device )
lowerCAmelCase__ = self.scheduler.timesteps.to(self.device )
lowerCAmelCase__ = latents
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
lowerCAmelCase__ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowerCAmelCase__ = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ = model_output.chunk(2 )
lowerCAmelCase__ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase )
lowerCAmelCase__ = self.truncate(__UpperCAmelCase , __UpperCAmelCase )
# remove `log(0)`'s (`-inf`s)
lowerCAmelCase__ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = self.vqvae.config.vq_embed_dim
lowerCAmelCase__ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowerCAmelCase__ = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase )
lowerCAmelCase__ = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample
lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> torch.FloatTensor:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase )
lowerCAmelCase__ = torch.exp(__UpperCAmelCase )
lowerCAmelCase__ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowerCAmelCase__ = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase )
lowerCAmelCase__ = torch.cat((all_true, keep_mask) , dim=1 )
lowerCAmelCase__ = keep_mask[:, :-1, :]
lowerCAmelCase__ = keep_mask.gather(1 , indices.argsort(1 ) )
lowerCAmelCase__ = log_p_x_0.clone()
lowerCAmelCase__ = -torch.inf # -inf = log(0)
return rv
| 339 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a_ = {'''UserAgent''': UserAgent().random}
def _a ( UpperCamelCase_ : Any ) -> dict:
"""simple docstring"""
lowerCAmelCase__ = script.contents[0]
lowerCAmelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> Any:
'''simple docstring'''
lowerCAmelCase__ = F"https://www.instagram.com/{username}/"
lowerCAmelCase__ = self.get_json()
def UpperCAmelCase ( self )-> dict:
'''simple docstring'''
lowerCAmelCase__ = requests.get(self.url , headers=__UpperCAmelCase ).text
lowerCAmelCase__ = BeautifulSoup(__UpperCAmelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self )-> str:
'''simple docstring'''
return F"{self.__class__.__name__}('{self.username}')"
def __str__( self )-> str:
'''simple docstring'''
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["username"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase ( self )-> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def UpperCAmelCase ( self )-> bool:
'''simple docstring'''
return self.user_data["is_private"]
def _a ( UpperCamelCase_ : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
lowerCAmelCase__ = InstagramUser(UpperCamelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = InstagramUser('''github''')
print(instagram_user)
print(F"{instagram_user.number_of_posts = }")
print(F"{instagram_user.number_of_followers = }")
print(F"{instagram_user.number_of_followings = }")
print(F"{instagram_user.email = }")
print(F"{instagram_user.website = }")
print(F"{instagram_user.profile_picture_url = }")
print(F"{instagram_user.is_verified = }")
print(F"{instagram_user.is_private = }")
| 339 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 73 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 1 |
from __future__ import annotations
import math
def _lowerCamelCase ( a_ : int , a_ : int , a_ : bool , a_ : list[int] , a_ : float):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''')
if len(a_) == 0:
raise ValueError('''Scores cannot be empty''')
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , a_ , a_ , a_) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_) , )
return min(
minimax(depth + 1 , node_index * 2 , a_ , a_ , a_) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_) , )
def _lowerCamelCase ( ):
lowerCamelCase :Any = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowerCamelCase :Dict = math.log(len(a_) , 2)
print('''Optimal value : ''' , end='''''')
print(minimax(0 , 0 , a_ , a_ , a_))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 166 | def _lowerCamelCase ( a_ : str):
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Union[str, Any] = credit_card_number
lowerCamelCase :Tuple = 0
lowerCamelCase :Any = len(a_) - 2
for i in range(a_ , -1 , -2):
# double the value of every second digit
lowerCamelCase :Any = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCamelCase :Optional[int] = cc_number[:i] + str(a_) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a_) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Union[str, Any] = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters.")
return False
if not 13 <= len(a_) <= 16:
print(F"{error_message} of its length.")
return False
if not validate_initial_digits(a_):
print(F"{error_message} of its first two digits.")
return False
if not luhn_validation(a_):
print(F"{error_message} it fails the Luhn check.")
return False
print(F"{credit_card_number} is a valid credit card number.")
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 166 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCamelCase__ = "\nHuman: <<task>>\n\nAssistant: "
UpperCamelCase__ = "huggingface-tools/default-prompts"
UpperCamelCase__ = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__="run" ) -> int:
if prompt_or_repo_id is None:
__lowercase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , lowercase__ ) is not None:
return prompt_or_repo_id
__lowercase = cached_file(
lowercase__ , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 634 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
__lowercase = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__: int = logging.getLogger(__name__)
UpperCamelCase__: Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
UpperCamelCase__: Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
lowerCamelCase__ = field(
default=A__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
lowerCamelCase__ = field(
default=A__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(A__ )} , )
lowerCamelCase__ = field(
default=A__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCamelCase__ = field(
default=A__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCamelCase__ = field(
default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
lowerCamelCase__ = field(
default=A__ , metadata={"""help""": """The input training data file (a text file)."""} )
lowerCamelCase__ = field(
default=A__ , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
lowerCamelCase__ = field(
default=A__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowerCamelCase__ = field(
default=A__ , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
lowerCamelCase__ = field(
default=A__ , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
lowerCamelCase__ = field(
default=A__ , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
lowerCamelCase__ = field(
default=A__ , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
lowerCamelCase__ = field(default=A__ , metadata={"""help""": """Whether ot not to use whole word mask."""} )
lowerCamelCase__ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
lowerCamelCase__ = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
lowerCamelCase__ = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
lowerCamelCase__ = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
lowerCamelCase__ = field(
default=A__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ( _lowerCAmelCase : DataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , ) -> Optional[Any]:
def _dataset(_lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , ref_path=_lowerCAmelCase , )
return LineByLineTextDataset(tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_lowerCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_lowerCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case_ ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase : List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
UpperCAmelCase : int = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase : Optional[Any] = AutoModelWithLMHead.from_config(_lowerCAmelCase )
model.resize_token_embeddings(len(_lowerCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
UpperCAmelCase : Union[str, Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase : Dict = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase : List[str] = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase : int = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , evaluate=_lowerCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase : List[str] = DataCollatorForPermutationLanguageModeling(
tokenizer=_lowerCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase : Tuple = DataCollatorForWholeWordMask(
tokenizer=_lowerCAmelCase , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=_lowerCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase : Any = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_collator=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , prediction_loss_only=_lowerCAmelCase , )
# Training
if training_args.do_train:
UpperCAmelCase : Union[str, Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_lowerCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase : Any = trainer.evaluate()
UpperCAmelCase : Tuple = math.exp(eval_output['''eval_loss'''] )
UpperCAmelCase : Optional[Any] = {'''perplexity''': perplexity}
UpperCAmelCase : Optional[int] = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _lowerCAmelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_lowerCAmelCase )
return results
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 127 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: List[Any] = logging.get_logger(__name__)
UpperCamelCase__: str = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """lxmert"""
lowerCamelCase__ = {}
def __init__( self : Optional[Any] , __snake_case : Dict=30522 , __snake_case : Dict=768 , __snake_case : Any=12 , __snake_case : List[str]=9500 , __snake_case : List[str]=1600 , __snake_case : List[Any]=400 , __snake_case : Optional[Any]=3072 , __snake_case : int="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : int=0.1 , __snake_case : int=512 , __snake_case : Union[str, Any]=2 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=1E-12 , __snake_case : Dict=9 , __snake_case : str=5 , __snake_case : Optional[int]=5 , __snake_case : int=2048 , __snake_case : Any=4 , __snake_case : Dict=6.67 , __snake_case : Tuple=True , __snake_case : Tuple=True , __snake_case : Dict=True , __snake_case : Optional[int]=True , __snake_case : str=True , __snake_case : Any=True , __snake_case : str=True , **__snake_case : Any , ) -> Any:
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Dict = num_qa_labels
UpperCAmelCase : Optional[Any] = num_object_labels
UpperCAmelCase : int = num_attr_labels
UpperCAmelCase : str = l_layers
UpperCAmelCase : Optional[Any] = x_layers
UpperCAmelCase : str = r_layers
UpperCAmelCase : Any = visual_feat_dim
UpperCAmelCase : int = visual_pos_dim
UpperCAmelCase : Tuple = visual_loss_normalizer
UpperCAmelCase : Tuple = task_matched
UpperCAmelCase : Union[str, Any] = task_mask_lm
UpperCAmelCase : Optional[Any] = task_obj_predict
UpperCAmelCase : Tuple = task_qa
UpperCAmelCase : Dict = visual_obj_loss
UpperCAmelCase : List[Any] = visual_attr_loss
UpperCAmelCase : Dict = visual_feat_loss
UpperCAmelCase : str = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__snake_case )
| 127 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
snake_case__ = "altclip_text_model"
def __init__( self : str , __SCREAMING_SNAKE_CASE : str=25_0002 , __SCREAMING_SNAKE_CASE : Any=1024 , __SCREAMING_SNAKE_CASE : List[str]=24 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : List[str]=4096 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=514 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : int=1e-05 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a_ : Tuple = vocab_size
a_ : List[str] = hidden_size
a_ : List[str] = num_hidden_layers
a_ : Tuple = num_attention_heads
a_ : Optional[Any] = hidden_act
a_ : List[Any] = intermediate_size
a_ : Optional[int] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : Optional[Any] = initializer_range
a_ : List[Any] = initializer_factor
a_ : Optional[Any] = layer_norm_eps
a_ : Optional[Any] = position_embedding_type
a_ : Dict = use_cache
a_ : Union[str, Any] = project_dim
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
snake_case__ = "altclip_vision_model"
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : List[Any]=3072 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : str=224 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : List[Any]="quick_gelu" , __SCREAMING_SNAKE_CASE : Dict=1e-5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1.0 , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> str:
super().__init__(**_lowerCamelCase )
a_ : List[Any] = hidden_size
a_ : Optional[Any] = intermediate_size
a_ : Any = projection_dim
a_ : List[str] = num_hidden_layers
a_ : Tuple = num_attention_heads
a_ : Dict = num_channels
a_ : Optional[Any] = patch_size
a_ : Tuple = image_size
a_ : Any = initializer_range
a_ : Union[str, Any] = initializer_factor
a_ : int = attention_dropout
a_ : int = layer_norm_eps
a_ : str = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowerCamelCase )
a_ , a_ : Tuple = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
a_ : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
snake_case__ = "altclip"
snake_case__ = True
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : int=2.6592 , **__SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
a_ : List[str] = kwargs.pop('''text_config_dict''' , _lowerCamelCase )
a_ : Tuple = kwargs.pop('''vision_config_dict''' , _lowerCamelCase )
super().__init__(**_lowerCamelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
a_ : str = {}
# This is the complete result when using `text_config_dict`.
a_ : List[str] = AltCLIPTextConfig(**_lowerCamelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
a_ : Any = (
f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
f'The value `text_config_dict[\"{key}\"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
a_ : List[str] = (
f'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
f'value `text_config[\"{key}\"]` will be overriden.'
)
logger.warning(_lowerCamelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
a_ : str = {}
# This is the complete result when using `vision_config_dict`.
a_ : Optional[int] = AltCLIPVisionConfig(**_lowerCamelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
a_ : Any = {
str(_lowerCamelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
a_ : str = (
f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
f'values. The value `vision_config_dict[\"{key}\"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
a_ : str = (
f'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
f'The value `vision_config[\"{key}\"]` will be overriden.'
)
logger.warning(_lowerCamelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
a_ : Any = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
a_ : List[Any] = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
a_ : List[Any] = AltCLIPTextConfig(**_lowerCamelCase )
a_ : int = AltCLIPVisionConfig(**_lowerCamelCase )
a_ : List[Any] = projection_dim
a_ : int = logit_scale_init_value
a_ : int = 1.0
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
a_ : List[Any] = copy.deepcopy(self.__dict__ )
a_ : Tuple = self.text_config.to_dict()
a_ : List[str] = self.vision_config.to_dict()
a_ : Any = self.__class__.model_type
return output
| 712 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
snake_case : int = threading.Lock()
snake_case : Optional[logging.Handler] = None
snake_case : str = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
snake_case : int = logging.WARNING
snake_case : Optional[int] = True
def __lowercase ( ):
a__ = os.getenv('TRANSFORMERS_VERBOSITY' , __lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __lowercase ( ):
return __name__.split('.' )[0]
def __lowercase ( ):
return logging.getLogger(_get_library_name() )
def __lowercase ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
a__ = logging.StreamHandler() # Set sys.stderr as stream.
a__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
a__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
a__ = False
def __lowercase ( ):
global _default_handler
with _lock:
if not _default_handler:
return
a__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
a__ = None
def __lowercase ( ):
return log_levels
def __lowercase ( __lowerCAmelCase : Optional[str] = None ):
if name is None:
a__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__lowerCAmelCase )
def __lowercase ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowercase ( __lowerCAmelCase : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(__lowerCAmelCase )
def __lowercase ( ):
return set_verbosity(__lowerCAmelCase )
def __lowercase ( ):
return set_verbosity(__lowerCAmelCase )
def __lowercase ( ):
return set_verbosity(__lowerCAmelCase )
def __lowercase ( ):
return set_verbosity(__lowerCAmelCase )
def __lowercase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowercase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowercase ( __lowerCAmelCase : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__lowerCAmelCase )
def __lowercase ( ):
_configure_library_root_logger()
a__ = False
def __lowercase ( ):
_configure_library_root_logger()
a__ = True
def __lowercase ( ):
a__ = _get_library_root_logger().handlers
for handler in handlers:
a__ = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__lowerCAmelCase )
def __lowercase ( ):
a__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__lowerCAmelCase )
def __lowercase ( self : List[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[int] ):
a__ = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __lowerCAmelCase )
if no_advisory_warnings:
return
self.warning(*__lowerCAmelCase , **__lowerCAmelCase )
snake_case : List[Any] = warning_advice
@functools.lru_cache(__lowerCAmelCase )
def __lowercase ( self : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[int] ):
self.warning(*__lowerCAmelCase , **__lowerCAmelCase )
snake_case : str = warning_once
class snake_case_ :
def __init__( self :Union[str, Any] ,*__snake_case :Tuple ,**__snake_case :Any ) -> Optional[Any]: # pylint: disable=unused-argument
a__ = args[0] if args else None
def __iter__( self :int ) -> List[Any]:
return iter(self._iterator )
def __getattr__( self :Union[str, Any] ,__snake_case :int ) -> Dict:
def empty_fn(*__snake_case :int ,**__snake_case :Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self :str ) -> int:
return self
def __exit__( self :List[Any] ,__snake_case :Optional[int] ,__snake_case :str ,__snake_case :List[str] ) -> Optional[int]:
return
class snake_case_ :
def __call__( self :Tuple ,*__snake_case :Dict ,**__snake_case :Union[str, Any] ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*__snake_case ,**__snake_case )
else:
return EmptyTqdm(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :Optional[Any] ,*__snake_case :Tuple ,**__snake_case :List[Any] ) -> Dict:
a__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :int ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
snake_case : int = _tqdm_cls()
def __lowercase ( ):
global _tqdm_active
return bool(_tqdm_active )
def __lowercase ( ):
global _tqdm_active
a__ = True
hf_hub_utils.enable_progress_bars()
def __lowercase ( ):
global _tqdm_active
a__ = False
hf_hub_utils.disable_progress_bars()
| 335 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
snake_case : int = logging.get_logger(__name__)
snake_case : List[Any] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = '''layoutlmv3'''
def __init__( self :Optional[Any] ,__snake_case :Dict=5_02_65 ,__snake_case :Union[str, Any]=7_68 ,__snake_case :Dict=12 ,__snake_case :List[str]=12 ,__snake_case :Any=30_72 ,__snake_case :int="gelu" ,__snake_case :List[str]=0.1 ,__snake_case :Optional[Any]=0.1 ,__snake_case :List[Any]=5_12 ,__snake_case :Any=2 ,__snake_case :Dict=0.02 ,__snake_case :Dict=1E-5 ,__snake_case :Tuple=1 ,__snake_case :Optional[int]=0 ,__snake_case :List[Any]=2 ,__snake_case :Optional[Any]=10_24 ,__snake_case :List[str]=1_28 ,__snake_case :List[str]=1_28 ,__snake_case :str=True ,__snake_case :Any=32 ,__snake_case :Union[str, Any]=1_28 ,__snake_case :Optional[Any]=64 ,__snake_case :List[Any]=2_56 ,__snake_case :Any=True ,__snake_case :Optional[int]=True ,__snake_case :List[str]=True ,__snake_case :Any=2_24 ,__snake_case :Union[str, Any]=3 ,__snake_case :int=16 ,__snake_case :Any=None ,**__snake_case :Dict ,) -> Any:
super().__init__(
vocab_size=__snake_case ,hidden_size=__snake_case ,num_hidden_layers=__snake_case ,num_attention_heads=__snake_case ,intermediate_size=__snake_case ,hidden_act=__snake_case ,hidden_dropout_prob=__snake_case ,attention_probs_dropout_prob=__snake_case ,max_position_embeddings=__snake_case ,type_vocab_size=__snake_case ,initializer_range=__snake_case ,layer_norm_eps=__snake_case ,pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case ,)
a__ = max_ad_position_embeddings
a__ = coordinate_size
a__ = shape_size
a__ = has_relative_attention_bias
a__ = rel_pos_bins
a__ = max_rel_pos
a__ = has_spatial_attention_bias
a__ = rel_ad_pos_bins
a__ = max_rel_ad_pos
a__ = text_embed
a__ = visual_embed
a__ = input_size
a__ = num_channels
a__ = patch_size
a__ = classifier_dropout
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = version.parse('''1.12''' )
@property
def lowerCamelCase__( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def lowerCamelCase__( self :Tuple ) -> float:
return 1E-5
@property
def lowerCamelCase__( self :Any ) -> int:
return 12
def lowerCamelCase__( self :Tuple ,__snake_case :"ProcessorMixin" ,__snake_case :int = -1 ,__snake_case :int = -1 ,__snake_case :bool = False ,__snake_case :Optional["TensorType"] = None ,__snake_case :int = 3 ,__snake_case :int = 40 ,__snake_case :int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,'apply_ocr' ,__snake_case )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a__ = compute_effective_axis_dimension(
__snake_case ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a__ = processor.tokenizer.num_special_tokens_to_add(__snake_case )
a__ = compute_effective_axis_dimension(
__snake_case ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
a__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
a__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
a__ = self._generate_dummy_images(__snake_case ,__snake_case ,__snake_case ,__snake_case )
a__ = dict(
processor(
__snake_case ,text=__snake_case ,boxes=__snake_case ,return_tensors=__snake_case ,) )
return inputs
| 335 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , **__lowerCamelCase : str ):
"""simple docstring"""
_snake_case = feature_size
_snake_case = sampling_rate
_snake_case = padding_value
_snake_case = kwargs.pop('''padding_side''' , '''right''' )
_snake_case = kwargs.pop('''return_attention_mask''' , __lowerCamelCase )
super().__init__(**__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f""" to this method that includes {self.model_input_names[0]}, but you provided"""
f""" {list(processed_features.keys() )}""" )
_snake_case = processed_features[self.model_input_names[0]]
_snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__lowerCamelCase ) == 0:
if return_attention_mask:
_snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_snake_case = required_input[0]
if isinstance(__lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__lowerCamelCase ):
_snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__lowerCamelCase ):
_snake_case = '''tf'''
elif is_torch_tensor(__lowerCamelCase ):
_snake_case = '''pt'''
elif isinstance(__lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
_snake_case = '''np'''
else:
raise ValueError(
f"""type of {first_element} unknown: {type(__lowerCamelCase )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_snake_case = to_numpy(__lowerCamelCase )
else:
_snake_case = [to_numpy(__lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
_snake_case = self._get_padding_strategies(padding=__lowerCamelCase , max_length=__lowerCamelCase )
_snake_case = processed_features[self.model_input_names[0]]
_snake_case = len(__lowerCamelCase )
if not all(len(__lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_snake_case = []
for i in range(__lowerCamelCase ):
_snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
_snake_case = self._truncate(
__lowerCamelCase , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , )
truncated_inputs.append(__lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_snake_case = PaddingStrategy.MAX_LENGTH
_snake_case = {}
for i in range(__lowerCamelCase ):
# padding
_snake_case = self._pad(
truncated_inputs[i] , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
_snake_case = []
if value.dtype is np.dtype(np.floataa ):
_snake_case = value.astype(np.floataa )
batch_outputs[key].append(__lowerCamelCase )
return BatchFeature(__lowerCamelCase , tensor_type=__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
_snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_snake_case = len(__lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_snake_case = np.ones(len(__lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
_snake_case = max_length - len(__lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
_snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_snake_case = np.pad(
__lowerCamelCase , __lowerCamelCase , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_snake_case = np.pad(
__lowerCamelCase , __lowerCamelCase , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_snake_case = len(__lowerCamelCase ) > max_length
if needs_to_be_truncated:
_snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str=False , __lowerCamelCase : Any=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
_snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = PaddingStrategy(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = padding
else:
_snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 103 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase : Optional[int] = '\\n\n'
lowerCamelCase : Dict = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCamelCase : Optional[int] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : int = 1_6 , _lowerCamelCase : bool = True , _lowerCamelCase : str=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ : Dict = 'cuda'
else:
snake_case__ : Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
snake_case__ : str = AutoModelForCausalLM.from_pretrained(_lowerCamelCase )
snake_case__ : str = model.to(_lowerCamelCase )
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_lowerCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ : Optional[int] = model.config.max_length - 1
else:
snake_case__ : List[str] = model.config.max_length
snake_case__ : Tuple = tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors='pt' , return_attention_mask=_lowerCamelCase , ).to(_lowerCamelCase )
snake_case__ : Tuple = encodings['input_ids']
snake_case__ : List[str] = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ : Any = []
snake_case__ : List[str] = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(_lowerCamelCase ) , _lowerCamelCase ) ):
snake_case__ : List[str] = min(start_index + batch_size , len(_lowerCamelCase ) )
snake_case__ : Optional[int] = encoded_texts[start_index:end_index]
snake_case__ : str = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ : Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_lowerCamelCase )
snake_case__ : Optional[int] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ : Optional[int] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_lowerCamelCase ), attn_mask] , dim=1 )
snake_case__ : Optional[int] = encoded_batch
with torch.no_grad():
snake_case__ : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase ).logits
snake_case__ : Tuple = out_logits[..., :-1, :].contiguous()
snake_case__ : Tuple = labels[..., 1:].contiguous()
snake_case__ : str = attn_mask[..., 1:].contiguous()
snake_case__ : str = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _lowerCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_lowerCamelCase )}
| 303 |
lowerCamelCase : int = {str(digit): digit**5 for digit in range(1_0)}
def lowercase__( A ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def lowercase__( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 303 | 1 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __lowerCamelCase ( self , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] =[label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(__UpperCAmelCase ) )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : int =[sequences]
SCREAMING_SNAKE_CASE_ : Any =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__UpperCAmelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__A )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase=ZeroShotClassificationArgumentHandler() , *__UpperCAmelCase , **__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] =args_parser
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def __lowerCamelCase ( self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=TruncationStrategy.ONLY_FIRST , **__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : int =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
SCREAMING_SNAKE_CASE_ : int =self.tokenizer.eos_token
try:
SCREAMING_SNAKE_CASE_ : List[str] =self.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , )
except Exception as e:
if "too short" in str(__UpperCAmelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
SCREAMING_SNAKE_CASE_ : Any =self.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __lowerCamelCase ( self , **__UpperCAmelCase ):
if kwargs.get('multi_class' , __UpperCAmelCase ) is not None:
SCREAMING_SNAKE_CASE_ : Tuple =kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] ={}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE_ : Tuple =kwargs['hypothesis_template']
SCREAMING_SNAKE_CASE_ : Any ={}
if "multi_label" in kwargs:
SCREAMING_SNAKE_CASE_ : Tuple =kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase , ):
if len(__UpperCAmelCase ) == 0:
pass
elif len(__UpperCAmelCase ) == 1 and "candidate_labels" not in kwargs:
SCREAMING_SNAKE_CASE_ : Optional[Any] =args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This example is {}." ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =self._args_parser(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(__UpperCAmelCase , __UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ : List[str] =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__UpperCAmelCase ) - 1,
**model_input,
}
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =inputs['candidate_label']
SCREAMING_SNAKE_CASE_ : Optional[int] =inputs['sequence']
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={k: inputs[k] for k in self.tokenizer.model_input_names}
SCREAMING_SNAKE_CASE_ : Dict =self.model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] ={
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ : str =[outputs['candidate_label'] for outputs in model_outputs]
SCREAMING_SNAKE_CASE_ : str =[outputs['sequence'] for outputs in model_outputs]
SCREAMING_SNAKE_CASE_ : List[Any] =np.concatenate([output['logits'].numpy() for output in model_outputs] )
SCREAMING_SNAKE_CASE_ : Dict =logits.shape[0]
SCREAMING_SNAKE_CASE_ : int =len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =N // n
SCREAMING_SNAKE_CASE_ : Dict =logits.reshape((num_sequences, n, -1) )
if multi_label or len(__UpperCAmelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
SCREAMING_SNAKE_CASE_ : str =self.entailment_id
SCREAMING_SNAKE_CASE_ : Optional[int] =-1 if entailment_id == 0 else 0
SCREAMING_SNAKE_CASE_ : List[Any] =reshaped_outputs[..., [contradiction_id, entailment_id]]
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.exp(__UpperCAmelCase ) / np.exp(__UpperCAmelCase ).sum(-1 , keepdims=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
SCREAMING_SNAKE_CASE_ : Tuple =reshaped_outputs[..., self.entailment_id]
SCREAMING_SNAKE_CASE_ : str =np.exp(__UpperCAmelCase ) / np.exp(__UpperCAmelCase ).sum(-1 , keepdims=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Any =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 220 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 'Speech2TextFeatureExtractor'
_lowercase = 'Speech2TextTokenizer'
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =self.feature_extractor
SCREAMING_SNAKE_CASE_ : Optional[int] =False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
SCREAMING_SNAKE_CASE_ : Any =kwargs.pop('raw_speech' )
else:
SCREAMING_SNAKE_CASE_ : str =kwargs.pop('audio' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =kwargs.pop('sampling_rate' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =kwargs.pop('text' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ : Dict =args[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ : str =self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : str =encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =True
SCREAMING_SNAKE_CASE_ : int =self.tokenizer
yield
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extractor
SCREAMING_SNAKE_CASE_ : Tuple =False
| 220 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
__a = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__a = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
__a = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
__a = model(_a , labels=_a ).loss
__a = -tf.math.reduce_mean(_a ).numpy()
__a = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 695 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowerCAmelCase__ ) , version.parse(lowerCAmelCase__ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> None:
__a = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , lowerCAmelCase__ ):
__a , __a , __a = requirement, None, None
else:
__a = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
__a , __a = match[0]
__a = want_full.split(''',''' ) # there could be multiple requirements
__a = {}
for w in want_range:
__a = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
__a , __a = match[0]
__a = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__a = '''.'''.join([str(lowerCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return
# check if any version is installed
try:
__a = importlib.metadata.version(lowerCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCAmelCase__ , lowerCAmelCase__ )
| 695 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 628 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
def SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1)) for i in range(10_00)]
lowercase__ : Optional[Any] = generate_large_matrix()
lowercase__ : Optional[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> None:
assert all(row == sorted(__UpperCamelCase , reverse=__UpperCamelCase) for row in grid)
assert all(list(__UpperCamelCase) == sorted(__UpperCamelCase , reverse=__UpperCamelCase) for col in zip(*__UpperCamelCase))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
a = 0
a = len(__UpperCamelCase) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
a = (left + right) // 2
a = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
a = mid + 1
else:
a = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
a = 0
a = len(grid[0])
for i in range(len(__UpperCamelCase)):
a = find_negative_index(grid[i][:bound])
total += bound
return (len(__UpperCamelCase) * len(grid[0])) - total
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
return len([number for row in grid for number in row if number < 0])
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
a = 0
for row in grid:
for i, number in enumerate(__UpperCamelCase):
if number < 0:
total += len(__UpperCamelCase) - i
break
return total
def SCREAMING_SNAKE_CASE ( ) -> None:
from timeit import timeit
print("Running benchmarks")
a = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
a = timeit(f'''{func}(grid=grid)''' , setup=__UpperCamelCase , number=5_00)
print(f'''{func}() took {time:0.4f} seconds''')
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 515 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class a__ ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=A , )
assert hasattr(self , "env" )
def lowerCAmelCase_ ( self , A ) -> List[Any]:
'''simple docstring'''
a = {
"enabled": True,
"processes_per_host": 8,
}
a = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
a = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
a = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=A , py_version="py36" , )
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
TrainingJobAnalytics(A ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self , A ) -> Tuple:
'''simple docstring'''
a = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
a = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , A )
| 515 | 1 |
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowerCamelCase_ : int = _modexpt(_lowercase , exponent // 2 , _lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_lowercase , exponent - 1 , _lowercase )) % modulo_value
def lowercase_ ( _lowercase = 1_777 , _lowercase = 1_855 , _lowercase = 8 ) -> int:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = base
for _ in range(1 , _lowercase ):
lowerCamelCase_ : List[Any] = _modexpt(_lowercase , _lowercase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 709 |
'''simple docstring'''
import qiskit
def lowercase_ ( _lowercase , _lowercase ) -> qiskit.result.counts.Counts:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowerCamelCase_ : Optional[int] = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowerCamelCase_ : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
__lowercase : List[Any] = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 357 | 0 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : str ):
assert column_title.isupper()
lowerCamelCase_ = 0
lowerCamelCase_ = len(UpperCAmelCase_ ) - 1
lowerCamelCase_ = 0
while index >= 0:
lowerCamelCase_ = (ord(column_title[index] ) - 64) * pow(26 , UpperCAmelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 675 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : int = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
a_ : Any = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
a_ : List[Any] = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = RoFormerTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or pre_tok_state.get("strip_accents" , UpperCamelCase ) != strip_accents
):
lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = pre_tok_class(**UpperCamelCase )
lowerCamelCase_ = do_lower_case
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = BertPreTokenizer()
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
lowerCamelCase_ = self.__dict__["_tokenizer"].get_vocab()
lowerCamelCase_ = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = BertPreTokenizer()
return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
| 675 | 1 |
import os
def a_ ( ) -> Any:
"""simple docstring"""
snake_case : Union[str, Any] = os.path.join(os.path.dirname(__magic_name__ ) , '''num.txt''' )
with open(__magic_name__ ) as file_hand:
return str(sum(int(__magic_name__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 84 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = 'van'
def __init__( self , A__=224 , A__=3 , A__=[7, 3, 3, 3] , A__=[4, 2, 2, 2] , A__=[64, 128, 320, 512] , A__=[3, 3, 12, 3] , A__=[8, 8, 4, 4] , A__="gelu" , A__=0.02 , A__=1E-6 , A__=1E-2 , A__=0.0 , A__=0.0 , **A__ , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
UpperCAmelCase_: Optional[Any] = image_size
UpperCAmelCase_: Union[str, Any] = num_channels
UpperCAmelCase_: int = patch_sizes
UpperCAmelCase_: Union[str, Any] = strides
UpperCAmelCase_: List[str] = hidden_sizes
UpperCAmelCase_: Optional[Any] = depths
UpperCAmelCase_: int = mlp_ratios
UpperCAmelCase_: str = hidden_act
UpperCAmelCase_: Union[str, Any] = initializer_range
UpperCAmelCase_: Any = layer_norm_eps
UpperCAmelCase_: Optional[int] = layer_scale_init_value
UpperCAmelCase_: Optional[Any] = drop_path_rate
UpperCAmelCase_: Any = dropout_rate | 137 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 1_0001 ) -> int:
try:
SCREAMING_SNAKE_CASE_ : int = int(SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE_ : list[int] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
while len(SCREAMING_SNAKE_CASE ) < nth:
if is_prime(SCREAMING_SNAKE_CASE ):
primes.append(SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 345 | 0 |
'''simple docstring'''
def A_ ( snake_case : Dict ) -> float:
'''simple docstring'''
__UpperCamelCase = 0
while len(snake_case ) > 1:
__UpperCamelCase = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__UpperCamelCase = files.index(min(snake_case ) )
temp += files[min_index]
files.pop(snake_case )
files.append(snake_case )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 1
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ = 1000 , SCREAMING_SNAKE_CASE_ = None )-> Tuple:
'''simple docstring'''
self.set_timesteps(SCREAMING_SNAKE_CASE_ )
# standard deviation of the initial noise distribution
__UpperCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCamelCase = 4
# running values
__UpperCamelCase = []
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = num_inference_steps
__UpperCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCamelCase = (1.0 - self.betas**2) ** 0.5
__UpperCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCamelCase = timesteps.to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = []
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , )-> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
__UpperCamelCase = (self.timesteps == timestep).nonzero().item()
__UpperCamelCase = timestep_index + 1
__UpperCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(SCREAMING_SNAKE_CASE_ )
if len(self.ets ) == 1:
__UpperCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCamelCase = self._get_prev_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.alphas[timestep_index]
__UpperCamelCase = self.betas[timestep_index]
__UpperCamelCase = self.alphas[prev_timestep_index]
__UpperCamelCase = self.betas[prev_timestep_index]
__UpperCamelCase = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE_ , 1E-8 )
__UpperCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self )-> Optional[int]:
'''simple docstring'''
return self.config.num_train_timesteps
| 451 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Union[str, Any] = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 255 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _lowerCAmelCase(a : int = 100_0000 , a : int = 10 ) -> int:
_SCREAMING_SNAKE_CASE =defaultdict(a )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_SCREAMING_SNAKE_CASE =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_SCREAMING_SNAKE_CASE =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(a , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 255 | 1 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 548 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=10 , __lowerCAmelCase=3 , __lowerCAmelCase=32 * 4 , __lowerCAmelCase=32 * 6 , __lowerCAmelCase=4 , __lowerCAmelCase=32 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = is_training
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = num_queries
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_size
UpperCamelCase__ = max_size
UpperCamelCase__ = num_labels
UpperCamelCase__ = mask_feature_size
def _lowerCamelCase ( self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
UpperCamelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
UpperCamelCase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
UpperCamelCase__ = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
UpperCamelCase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowerCamelCase ( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = output.encoder_hidden_states
UpperCamelCase__ = output.pixel_decoder_hidden_states
UpperCamelCase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
with torch.no_grad():
UpperCamelCase__ = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase__ = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
UpperCamelCase__ = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case : str = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case : Tuple = False
snake_case : str = False
snake_case : Any = False
snake_case : Union[str, Any] = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = MaskFormerModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def _lowerCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(__lowerCAmelCase )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase__ = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = (self.model_tester.min_size,) * 2
UpperCamelCase__ = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__lowerCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(),
}
UpperCamelCase__ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
UpperCamelCase__ = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
UpperCamelCase__ = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
UpperCamelCase__ = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def _lowerCamelCase ( self ):
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
UpperCamelCase__ = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
UpperCamelCase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase__ = 1e-4
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def _lowerCamelCase ( self ):
UpperCamelCase__ = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__lowerCAmelCase )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
UpperCamelCase__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
UpperCamelCase__ = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
UpperCamelCase__ = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
UpperCamelCase__ = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__lowerCAmelCase )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
UpperCamelCase__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCamelCase__ = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__lowerCAmelCase )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
UpperCamelCase__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCamelCase__ = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__lowerCAmelCase )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
UpperCamelCase__ = inputs["""pixel_values"""].to(__lowerCAmelCase )
UpperCamelCase__ = [el.to(__lowerCAmelCase ) for el in inputs["""mask_labels"""]]
UpperCamelCase__ = [el.to(__lowerCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 548 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = b.T
SCREAMING_SNAKE_CASE__ = np.sum(np.square(UpperCamelCase__ ) , axis=1 )
SCREAMING_SNAKE_CASE__ = np.sum(np.square(UpperCamelCase__ ) , axis=0 )
SCREAMING_SNAKE_CASE__ = np.matmul(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE__ = squared_euclidean_distance(UpperCamelCase__ , UpperCamelCase__ )
return np.argmin(UpperCamelCase__ , axis=1 )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["pixel_values"]
def __init__( self :int , __A :Optional[Union[List[List[int]], np.ndarray]] = None , __A :bool = True , __A :Dict[str, int] = None , __A :PILImageResampling = PILImageResampling.BILINEAR , __A :bool = True , __A :bool = True , **__A :int , ) -> None:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""height""": 256, """width""": 256}
SCREAMING_SNAKE_CASE__ = get_size_dict(__A )
SCREAMING_SNAKE_CASE__ = np.array(__A ) if clusters is not None else None
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = do_color_quantize
def _snake_case ( self :str , __A :np.ndarray , __A :Dict[str, int] , __A :PILImageResampling = PILImageResampling.BILINEAR , __A :Optional[Union[str, ChannelDimension]] = None , **__A :List[str] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__A , size=(size["""height"""], size["""width"""]) , resample=__A , data_format=__A , **__A )
def _snake_case ( self :List[Any] , __A :np.ndarray , __A :Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = rescale(image=__A , scale=1 / 1_2_7.5 , data_format=__A )
SCREAMING_SNAKE_CASE__ = image - 1
return image
def _snake_case ( self :Optional[int] , __A :ImageInput , __A :bool = None , __A :Dict[str, int] = None , __A :PILImageResampling = None , __A :bool = None , __A :Optional[bool] = None , __A :Optional[Union[List[List[int]], np.ndarray]] = None , __A :Optional[Union[str, TensorType]] = None , __A :Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__A :List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(__A )
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE__ = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE__ = np.array(__A )
SCREAMING_SNAKE_CASE__ = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(__A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=__A ) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(__A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE__ = np.array(__A )
SCREAMING_SNAKE_CASE__ = color_quantize(__A , __A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE__ = images.shape[0]
SCREAMING_SNAKE_CASE__ = images.reshape(__A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE__ = list(__A )
else:
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(__A , __A ) for image in images]
SCREAMING_SNAKE_CASE__ = {"""input_ids""": images}
return BatchFeature(data=__A , tensor_type=__A ) | 6 |
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 | 0 |
"""simple docstring"""
from manim import *
class _a ( lowerCAmelCase):
"""simple docstring"""
def lowercase__ ( self : Dict )->Tuple:
_UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_UpperCAmelCase = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_UpperCAmelCase = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_UpperCAmelCase = Text('''CPU''' , font_size=2_4 )
_UpperCAmelCase = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
_UpperCAmelCase = [mem.copy() for i in range(1 )]
_UpperCAmelCase = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_UpperCAmelCase = Text('''GPU''' , font_size=2_4 )
_UpperCAmelCase = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.align_to(__UpperCamelCase , __UpperCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__UpperCamelCase )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_UpperCAmelCase = Text('''Model''' , font_size=2_4 )
_UpperCAmelCase = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) , )
_UpperCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=2_4 , )
_UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase , run_time=2.5 ) , Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.add(__UpperCamelCase )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for i, rect in enumerate(__UpperCamelCase ):
_UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
cpu_target.move_to(__UpperCamelCase )
cpu_target.generate_target()
_UpperCAmelCase = 0.4_6 / 4
_UpperCAmelCase = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__UpperCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__UpperCamelCase , buff=0.0 )
cpu_targs.append(__UpperCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__UpperCamelCase ) )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 95 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Dict = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """informer"""
UpperCamelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : List[str] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "student_t" , __UpperCamelCase : str = "nll" , __UpperCamelCase : int = 1 , __UpperCamelCase : List[int] = None , __UpperCamelCase : Optional[Union[str, bool]] = "mean" , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : int = 6_4 , __UpperCamelCase : int = 3_2 , __UpperCamelCase : int = 3_2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : bool = True , __UpperCamelCase : str = "gelu" , __UpperCamelCase : float = 0.0_5 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : int = 1_0_0 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : str = "prob" , __UpperCamelCase : int = 5 , __UpperCamelCase : bool = True , **__UpperCamelCase : Union[str, Any] , )->Dict:
# time series specific configuration
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length or prediction_length
_UpperCAmelCase = distribution_output
_UpperCAmelCase = loss
_UpperCAmelCase = input_size
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_UpperCAmelCase = scaling
_UpperCAmelCase = num_dynamic_real_features
_UpperCAmelCase = num_static_real_features
_UpperCAmelCase = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_UpperCAmelCase = cardinality
else:
_UpperCAmelCase = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_UpperCAmelCase = embedding_dimension
else:
_UpperCAmelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = use_cache
# Informer
_UpperCAmelCase = attention_type
_UpperCAmelCase = sampling_factor
_UpperCAmelCase = distil
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowercase__ ( self : Optional[int] )->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 95 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[int] ): # This function is recursive
__UpperCAmelCase = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCAmelCase = array[0]
__UpperCAmelCase = False
__UpperCAmelCase = 1
__UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCAmelCase = True
__UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
__UpperCAmelCase = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__UpperCAmelCase = temp_array
else:
i += 1
__UpperCAmelCase = [element for element in array[1:] if element >= pivot]
__UpperCAmelCase = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def __UpperCAmelCase ( __lowerCamelCase : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
| 103 | 0 |
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = int(SCREAMING_SNAKE_CASE_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase = divmod(SCREAMING_SNAKE_CASE_ , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE_ ) + str(SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase = str(SCREAMING_SNAKE_CASE_ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
UpperCamelCase = """-""" if number.startswith("""-""" ) else """"""
UpperCamelCase = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f'{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE_ ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 181 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase :
def __init__( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : str=1_3 , __magic_name__ : str=7 , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Tuple=9_9 , __magic_name__ : List[str]=3_2 , __magic_name__ : List[str]=5 , __magic_name__ : int=4 , __magic_name__ : Union[str, Any]=3_7 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[Any]=5_0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = use_labels
UpperCamelCase = scope
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase_ ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : List[str] , **__magic_name__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = BertGenerationEncoder(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCamelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = BertGenerationEncoder(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
UpperCamelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Dict , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = BertGenerationDecoder(config=__magic_name__ ).to(__magic_name__ ).eval()
# first forward pass
UpperCamelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , use_cache=__magic_name__ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_hidden_states=__magic_name__ , )["""hidden_states"""][0]
UpperCamelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def lowerCamelCase_ ( self : int , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , *__magic_name__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = BertGenerationDecoder(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = BertGenerationEncoderTester(self )
UpperCamelCase = ConfigTester(self , config_class=__magic_name__ , hidden_size=3_7 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
UpperCamelCase = """bert"""
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__magic_name__ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__magic_name__ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__magic_name__ )
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(__magic_name__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
UpperCamelCase = model(__magic_name__ )[0]
UpperCamelCase = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , __magic_name__ )
UpperCamelCase = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1e-4 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
UpperCamelCase = model(__magic_name__ )[0]
UpperCamelCase = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , __magic_name__ )
UpperCamelCase = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1e-4 ) )
| 181 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE ) as metadata_file:
_UpperCamelCase =json.load(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =LukeConfig(use_entity_aware_attention=__SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# Load the entity vocab file
_UpperCamelCase =load_entity_vocab(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase =AddedToken('''<ent>''' , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =AddedToken('''<ent2>''' , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCamelCase =LukeTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
_UpperCamelCase =state_dict['''embeddings.word_embeddings.weight''']
_UpperCamelCase =word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCamelCase =word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCamelCase =torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase =f'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase =state_dict[prefix + matrix_name]
_UpperCamelCase =state_dict[prefix + matrix_name]
_UpperCamelCase =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase =state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCamelCase =entity_emb[entity_vocab['''[MASK]''']]
_UpperCamelCase =LukeModel(config=__SCREAMING_SNAKE_CASE ).eval()
_UpperCamelCase , _UpperCamelCase =model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
if not (len(__SCREAMING_SNAKE_CASE ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {', '.join(__SCREAMING_SNAKE_CASE )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
_UpperCamelCase =LukeTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , task='''entity_classification''' )
_UpperCamelCase =(
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCamelCase =(39, 42)
_UpperCamelCase =tokenizer(__SCREAMING_SNAKE_CASE , entity_spans=[span] , add_prefix_space=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
_UpperCamelCase =model(**__SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
_UpperCamelCase =torch.Size((1, 42, 1024) )
_UpperCamelCase =torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_UpperCamelCase =torch.Size((1, 42, 768) )
_UpperCamelCase =torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCamelCase =torch.Size((1, 1, 1024) )
_UpperCamelCase =torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_UpperCamelCase =torch.Size((1, 1, 768) )
_UpperCamelCase =torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__SCREAMING_SNAKE_CASE ) )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase ={}
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
_UpperCamelCase , _UpperCamelCase =line.rstrip().split('''\t''' )
_UpperCamelCase =index
return entity_vocab
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__lowerCamelCase : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 404 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = GPTSanJapaneseTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCamelCase__ ( self : Tuple ) -> Optional[Any]:
super().setUp()
# fmt: off
_UpperCamelCase =['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
_UpperCamelCase ={'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
_UpperCamelCase ={'''unk_token''': '''<unk>'''}
_UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCamelCase__ ) )
def UpperCamelCase__ ( self : Optional[Any] , **UpperCamelCase__ : List[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : List[str] ) -> int:
_UpperCamelCase ='''こんにちは、世界。 \nこんばんは、㔺界。😀'''
_UpperCamelCase ='''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> List[Any]:
_UpperCamelCase , _UpperCamelCase =self.get_input_output_texts(UpperCamelCase__ )
_UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
_UpperCamelCase =tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return text, ids
def UpperCamelCase__ ( self : Tuple ) -> Dict:
pass # TODO add if relevant
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
pass # TODO add if relevant
def UpperCamelCase__ ( self : Tuple ) -> int:
pass # TODO add if relevant
def UpperCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase =self.get_tokenizer()
# Testing tokenization
_UpperCamelCase ='''こんにちは、世界。 こんばんは、㔺界。'''
_UpperCamelCase =['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
_UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing conversion to ids without special tokens
_UpperCamelCase =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing conversion to ids with special tokens
_UpperCamelCase =tokens + [tokenizer.unk_token]
_UpperCamelCase =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase =self.get_tokenizer()
# Testing tokenization
_UpperCamelCase ='''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
_UpperCamelCase ='''こんにちは、、、、世界。こんばんは、、、、世界。'''
_UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
_UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCamelCase__ ( self : str ) -> str:
_UpperCamelCase =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_UpperCamelCase ='''こんにちは、世界。'''
_UpperCamelCase ='''こんばんは、㔺界。😀'''
_UpperCamelCase ='''こんにちは、世界。こんばんは、世界。😀'''
_UpperCamelCase =tokenizer.encode(prefix_text + input_text )
_UpperCamelCase =tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
_UpperCamelCase =tokenizer.encode(UpperCamelCase__ , prefix_text=UpperCamelCase__ )
_UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
_UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
_UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCamelCase__ ( self : Optional[int] ) -> Any:
_UpperCamelCase =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_UpperCamelCase ='''こんにちは、世界。'''
_UpperCamelCase ='''こんばんは、㔺界。😀'''
_UpperCamelCase =len(tokenizer.encode(UpperCamelCase__ ) ) - 2
_UpperCamelCase =len(tokenizer.encode(UpperCamelCase__ ) ) - 2
_UpperCamelCase =[1] + [0] * (len_prefix + len_text + 1)
_UpperCamelCase =[1] * (len_prefix + len_text + 1) + [0]
_UpperCamelCase =[1] + [1] * (len_prefix) + [0] * (len_text + 1)
_UpperCamelCase =tokenizer(prefix_text + input_text ).token_type_ids
_UpperCamelCase =tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
_UpperCamelCase =tokenizer(UpperCamelCase__ , prefix_text=UpperCamelCase__ ).token_type_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCamelCase__ ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_UpperCamelCase =tokenizer.encode('''あンいワ''' )
_UpperCamelCase =tokenizer.encode('''''' , prefix_text='''あンいワ''' )
_UpperCamelCase =tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , tokenizer.decode(UpperCamelCase__ ) )
self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , tokenizer.decode(UpperCamelCase__ ) )
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase__ ( self : Tuple ) -> Dict:
_UpperCamelCase =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_UpperCamelCase =[['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
_UpperCamelCase =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ )
_UpperCamelCase =tokenizer.batch_encode_plus(UpperCamelCase__ , padding=UpperCamelCase__ )
# fmt: off
_UpperCamelCase =[[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
_UpperCamelCase =[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_UpperCamelCase =[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCamelCase__ )
self.assertListEqual(x_token.token_type_ids , UpperCamelCase__ )
self.assertListEqual(x_token.attention_mask , UpperCamelCase__ )
self.assertListEqual(x_token_a.input_ids , UpperCamelCase__ )
self.assertListEqual(x_token_a.token_type_ids , UpperCamelCase__ )
self.assertListEqual(x_token_a.attention_mask , UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[int] ) -> int:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase__ ( self : Dict ) -> Optional[int]:
# tokenizer has no padding token
pass
| 404 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: Optional[int]=False, SCREAMING_SNAKE_CASE__: Any=False, SCREAMING_SNAKE_CASE__: Any=False ) -> Dict:
"""simple docstring"""
__a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Tuple, SCREAMING_SNAKE_CASE__: Any ) -> Tuple:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
__a = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" )
__a = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[
: config.hidden_size, :
]
__a = in_proj_bias[: config.hidden_size]
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str] ) -> List[Any]:
"""simple docstring"""
__a = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Tuple, SCREAMING_SNAKE_CASE__: List[str], SCREAMING_SNAKE_CASE__: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__a = dct.pop(UpperCAmelCase__ )
__a = val
@torch.no_grad()
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: Optional[int] ) -> List[str]:
"""simple docstring"""
__a = ViltConfig(image_size=384, patch_size=32, tie_word_embeddings=UpperCAmelCase__ )
__a = False
__a = False
__a = False
__a = False
if "vqa" in checkpoint_url:
__a = True
__a = 3129
__a = """huggingface/label-files"""
__a = """vqa2-id2label.json"""
__a = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type='dataset' ), 'r' ) )
__a = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = ViltForQuestionAnswering(UpperCAmelCase__ )
elif "nlvr" in checkpoint_url:
__a = True
__a = 2
__a = {0: """False""", 1: """True"""}
__a = {v: k for k, v in config.idalabel.items()}
__a = 3
__a = ViltForImagesAndTextClassification(UpperCAmelCase__ )
elif "irtr" in checkpoint_url:
__a = True
__a = ViltForImageAndTextRetrieval(UpperCAmelCase__ )
elif "mlm_itm" in checkpoint_url:
__a = True
__a = ViltForMaskedLM(UpperCAmelCase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
__a = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location='cpu' )["""state_dict"""]
__a = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ )
if mlm_model or irtr_model:
__a = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__a = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCAmelCase__ )
# Define processor
__a = ViltImageProcessor(size=384 )
__a = BertTokenizer.from_pretrained('bert-base-uncased' )
__a = ViltProcessor(UpperCAmelCase__, UpperCAmelCase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
__a = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg', stream=UpperCAmelCase__ ).raw )
__a = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg', stream=UpperCAmelCase__ ).raw )
__a = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
__a = processor(UpperCAmelCase__, UpperCAmelCase__, return_tensors='pt' )
__a = processor(UpperCAmelCase__, UpperCAmelCase__, return_tensors='pt' )
__a = model(
input_ids=encoding_a.input_ids, pixel_values=encoding_a.pixel_values, pixel_values_a=encoding_a.pixel_values, )
else:
__a = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg', stream=UpperCAmelCase__ ).raw )
if mlm_model:
__a = """a bunch of [MASK] laying on a [MASK]."""
else:
__a = """How many cats are there?"""
__a = processor(UpperCAmelCase__, UpperCAmelCase__, return_tensors='pt' )
__a = model(**UpperCAmelCase__ )
# Verify outputs
if mlm_model:
__a = torch.Size([1, 11, 30522] )
__a = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], UpperCAmelCase__, atol=1e-4 )
# verify masked token prediction equals "cats"
__a = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__a = torch.Size([1, 3129] )
__a = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3], UpperCAmelCase__, atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], UpperCAmelCase__, atol=1e-4 )
# verify vqa prediction equals "2"
__a = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__a = torch.Size([1, 2] )
__a = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3], UpperCAmelCase__, atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__UpperCamelCase : int = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 708 |
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 270 | 0 |
from torch import nn
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , a_ : Tuple , a_ : Optional[int] ):
"""simple docstring"""
super().__init__()
lowerCamelCase__ = class_size
lowerCamelCase__ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCamelCase__ = nn.Linear(a_ , a_ )
def _UpperCamelCase ( self : List[str] , a_ : Tuple ):
"""simple docstring"""
lowerCamelCase__ = self.mlp(a_ )
return logits
| 165 |
def snake_case (UpperCamelCase : int = 2000000 ):
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(n + 1 )]
lowerCamelCase__ = 1
lowerCamelCase__ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , UpperCamelCase ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
for i in range(UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 165 | 1 |
from __future__ import annotations
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowercase__ : List[Any] = set(SCREAMING_SNAKE_CASE_ ), [start]
while stack:
lowercase__ : Optional[Any] = stack.pop()
explored.add(SCREAMING_SNAKE_CASE_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(SCREAMING_SNAKE_CASE_ )
return explored
__a : Dict = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A''')) | 705 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : str = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Optional[int] = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowercase__ : Optional[Any] = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowercase__ : int = shift_tokens_right(lowerCamelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowercase__ : List[Any] = model(lowerCamelCase , decoder_input_ids=lowerCamelCase ).logits
lowercase__ : Any = optax.softmax_cross_entropy(lowerCamelCase , onehot(lowerCamelCase , logits.shape[-1] ) ).mean()
lowercase__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowercase__ : Any = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 ) | 298 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 97 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = '▁'
__a = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
__a = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
__a = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
__a = {
'ernie-m-base': 5_1_4,
'ernie-m-large': 5_1_4,
}
__a = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = ["input_ids"]
a :Dict = VOCAB_FILES_NAMES
a :Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
a :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :List[str] = PRETRAINED_VOCAB_FILES_MAP
a :Tuple = RESOURCE_FILES_NAMES
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]="utf8" , SCREAMING_SNAKE_CASE_ : List[Any]="[UNK]" , SCREAMING_SNAKE_CASE_ : Dict="[SEP]" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE_ : Optional[int]="[CLS]" , SCREAMING_SNAKE_CASE_ : Optional[int]="[MASK]" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , vocab_file=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = do_lower_case
lowercase_ = sentencepiece_model_ckpt
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase_ = self.load_vocab(filepath=SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = {self.sp_model.id_to_piece(SCREAMING_SNAKE_CASE_ ): id for id in range(self.sp_model.get_piece_size() )}
lowercase_ = {v: k for k, v in self.vocab.items()}
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
if text is None:
return None
lowercase_ = self.tokenize(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = '''''', []
for i, ch in enumerate(SCREAMING_SNAKE_CASE_ ):
if ch in self.SP_CHAR_MAPPING:
lowercase_ = self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = unicodedata.normalize('''NFKC''' , SCREAMING_SNAKE_CASE_ )
if self.is_whitespace(SCREAMING_SNAKE_CASE_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(SCREAMING_SNAKE_CASE_ ) )
lowercase_ , lowercase_ , lowercase_ = normalized_text, [], 0
if self.do_lower_case:
lowercase_ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase_ = token[1:]
lowercase_ = text[offset:].index(SCREAMING_SNAKE_CASE_ ) + offset
lowercase_ = start + len(SCREAMING_SNAKE_CASE_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase_ = end
return token_mapping
@property
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.vocab )
def _lowercase ( self : Any ) -> Optional[int]:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Dict ) -> int:
lowercase_ = self.__dict__.copy()
lowercase_ = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
lowercase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
return "".join((self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c in text) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=6_4 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 ) -> Any:
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowercase_ = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowercase_ = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowercase_ = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowercase_ = self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = self.sp_model.SampleEncodeAsPieces(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = []
for pi, piece in enumerate(SCREAMING_SNAKE_CASE_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(SCREAMING_SNAKE_CASE_ ) and pi != 0:
new_pieces.append(SCREAMING_SNAKE_CASE_ )
continue
else:
continue
lowercase_ = 0
for i, chunk in enumerate(SCREAMING_SNAKE_CASE_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(SCREAMING_SNAKE_CASE_ ) or self.is_punct(SCREAMING_SNAKE_CASE_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase_ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase_ = i
if len(SCREAMING_SNAKE_CASE_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
lowercase_ = ''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip()
return out_string
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
lowercase_ = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
lowercase_ = ''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip()
return out_string
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[str]:
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
return self.reverse_vocab.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str=None ) -> List[str]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None ) -> int:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> Any:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(SCREAMING_SNAKE_CASE_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(SCREAMING_SNAKE_CASE_ ) + 1) + [1] * (len(SCREAMING_SNAKE_CASE_ ) + 3)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(SCREAMING_SNAKE_CASE_ ) == 1:
lowercase_ = unicodedata.category(SCREAMING_SNAKE_CASE_ )
if cat == "Zs":
return True
return False
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Tuple:
lowercase_ = {}
with io.open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase_ = line.rstrip('''\n''' )
lowercase_ = int(SCREAMING_SNAKE_CASE_ )
return token_to_idx
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
lowercase_ = 0
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase_ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
lowercase_ = token_index
writer.write(token + '''\n''' )
index += 1
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''sentencepiece.bpe.model''' )
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (vocab_file,)
| 97 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase__ ( ):
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class _lowercase ( nn.Module):
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Union[str, Any] = nn.Linear(3 , 4 )
lowerCamelCase__ : List[str] = nn.BatchNormad(4 )
lowerCamelCase__ : Any = nn.Linear(4 , 5 )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : str = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Tuple = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCamelCase__ : Tuple = mock_training_loop_function("hello" )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : List[str] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : List[Any] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = torch.cuda.memory_allocated()
lowerCamelCase__ : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
lowerCamelCase__ : Dict = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 718 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "ibert"
def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : List[str] = quant_mode
lowerCamelCase__ : int = force_dequant
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 5 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : List[str] , _A : Optional[Any] , _A : str=13 , _A : List[Any]=32 , _A : Tuple=3 , _A : Tuple=4 , _A : List[str]=[10, 20, 30, 40] , _A : Tuple=[2, 2, 3, 2] , _A : List[str]=True , _A : List[str]=True , _A : Optional[int]=37 , _A : Union[str, Any]="gelu" , _A : int=10 , _A : List[str]=0.0_2 , _A : Tuple=["stage2", "stage3", "stage4"] , _A : Dict=3 , _A : Dict=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : int = num_channels
UpperCAmelCase__ : int = num_stages
UpperCAmelCase__ : List[Any] = hidden_sizes
UpperCAmelCase__ : Dict = depths
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Any = out_features
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : List[str] = num_stages
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : int ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase_ ( self : str ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_A , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_A , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowercase_ ( self : Any , _A : int , _A : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = UperNetForSemanticSegmentation(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = UperNetModelTester(self )
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : Dict ):
'''simple docstring'''
return
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def lowercase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(_A : Any , _A : List[Any] , _A : Dict ):
UpperCAmelCase__ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = _config_zero_init(_A )
UpperCAmelCase__ : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(config=_A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Tuple:
UpperCAmelCase__ : Optional[int] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
UpperCAmelCase__ : Optional[Any] = Image.open(lowerCAmelCase__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_A )
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Union[str, Any] = processor(images=_A , return_tensors='''pt''' ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**_A )
UpperCAmelCase__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Any = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4 ) )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_A )
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : Dict = processor(images=_A , return_tensors='''pt''' ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**_A )
UpperCAmelCase__ : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4 ) )
| 75 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCamelCase :
def __init__( self :List[str] , __magic_name__ :Tuple , ):
lowercase : Optional[Any] = parent
lowercase : int = 13
lowercase : List[str] = 7
lowercase : int = 30
lowercase : int = self.seq_length + self.mem_len
lowercase : int = 15
lowercase : Union[str, Any] = True
lowercase : List[str] = True
lowercase : Optional[int] = 99
lowercase : Optional[Any] = [10, 50, 80]
lowercase : Optional[Any] = 32
lowercase : str = 32
lowercase : Union[str, Any] = 4
lowercase : Tuple = 8
lowercase : Optional[Any] = 128
lowercase : Tuple = 2
lowercase : Any = 2
lowercase : List[str] = None
lowercase : int = 1
lowercase : Optional[int] = 0
lowercase : int = 3
lowercase : Optional[int] = self.vocab_size - 1
lowercase : int = 0.01
def __snake_case ( self :List[Any] ):
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Dict = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __snake_case ( self :str ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __snake_case ( self :str , __magic_name__ :List[Any] , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :Union[str, Any] ):
lowercase : Union[str, Any] = TFTransfoXLModel(UpperCamelCase__ )
lowercase : Tuple = model(UpperCamelCase__ ).to_tuple()
lowercase : Optional[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase : int = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self :Optional[Any] , __magic_name__ :Any , __magic_name__ :Union[str, Any] , __magic_name__ :Tuple , __magic_name__ :List[str] ):
lowercase : Optional[Any] = TFTransfoXLLMHeadModel(UpperCamelCase__ )
lowercase : Optional[int] = model(UpperCamelCase__ ).to_tuple()
lowercase : List[Any] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase : Tuple = model(UpperCamelCase__ ).to_tuple()
lowercase : Optional[int] = model([input_ids_a, mems_a] ).to_tuple()
lowercase : Any = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase : List[str] = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self :str , __magic_name__ :str , __magic_name__ :Union[str, Any] , __magic_name__ :Any , __magic_name__ :List[Any] ):
lowercase : Tuple = TFTransfoXLForSequenceClassification(UpperCamelCase__ )
lowercase : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self :Union[str, Any] ):
lowercase : int = self.prepare_config_and_inputs()
(lowercase) : Dict = config_and_inputs
lowercase : List[str] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class UpperCamelCase (SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_SCREAMING_SNAKE_CASE : List[Any] = () if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : Tuple = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def __snake_case ( self :int , __magic_name__ :int , __magic_name__ :Any , __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __snake_case ( self :Tuple ):
lowercase : str = TFTransfoXLModelTester(self )
lowercase : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , d_embed=37 )
def __snake_case ( self :Union[str, Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self :Union[str, Any] ):
self.model_tester.set_seed()
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase__ )
def __snake_case ( self :Tuple ):
self.model_tester.set_seed()
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase__ )
def __snake_case ( self :Optional[int] ):
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase__ )
def __snake_case ( self :List[str] ):
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase : Any = model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase : Any = model.get_output_embeddings()
assert isinstance(UpperCamelCase__ , tf.keras.layers.Layer )
lowercase : Union[str, Any] = model.get_bias()
assert name is None
else:
lowercase : Any = model.get_output_embeddings()
assert x is None
lowercase : Optional[Any] = model.get_bias()
assert name is None
def __snake_case ( self :Any ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __snake_case ( self :Dict ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[Any] = TFTransfoXLModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason="""This model doesn\'t play well with fit() due to not returning a single loss.""" )
def __snake_case ( self :Tuple ):
pass
@require_tf
class UpperCamelCase (unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __snake_case ( self :Optional[int] ):
lowercase : Any = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
lowercase : List[str] = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase : Dict = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase : Union[str, Any] = model.generate(UpperCamelCase__ , max_length=200 , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
| 711 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :Optional[Any] ) ->str:
lowercase : Dict = get_activation("""swish""" )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __snake_case ( self :Union[str, Any] ) ->Any:
lowercase : Any = get_activation("""silu""" )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __snake_case ( self :str ) ->str:
lowercase : Tuple = get_activation("""mish""" )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __snake_case ( self :List[str] ) ->Union[str, Any]:
lowercase : Optional[Any] = get_activation("""gelu""" )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 348 | 0 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (A_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.25 , width=0.25 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
_a = []
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
model_cpu_arr.append(__a )
self.add(*__a , *__a , *__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
checkpoint.move_to([3, 0.5, 0] )
self.add(__a )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
ckpt_arr.append(__a )
_a = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__a )
self.add(*__a , *__a )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__a )
_a = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
_a = [meta_mem.copy() for i in range(6 )]
_a = [meta_mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("Disk" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__a , run_time=3 ) , Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
for i, rect in enumerate(__a ):
_a = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(FadeOut(__a ) )
_a = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) )
self.play(
FadeOut(__a , __a , *__a , *__a ) , )
self.wait()
| 692 |
from ...configuration_utils import PretrainedConfig
class __A ( A_ ):
UpperCamelCase :str = '''bert-generation'''
def __init__(self , __magic_name__=50358 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=0.02 , __magic_name__=1E-12 , __magic_name__=0 , __magic_name__=2 , __magic_name__=1 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : Dict = position_embedding_type
lowerCamelCase__ : Optional[Any] = use_cache
| 157 | 0 |
"""simple docstring"""
class snake_case :
def __init__( self : Tuple , a__ : int ) -> None:
'''simple docstring'''
_A = size
_A = [0] * size
_A = [0] * size
@staticmethod
def a_ ( a__ : int ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def a_ ( a__ : int ) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def a_ ( self : Any , a__ : int , a__ : int ) -> None:
'''simple docstring'''
_A = value
while index < self.size:
_A = self.get_prev(a__ ) + 1
if current_left_border == index:
_A = value
else:
_A = max(a__ , a__ , a__ )
_A = self.get_next(a__ )
def a_ ( self : int , a__ : int , a__ : int ) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
_A = 0
while left <= right:
_A = self.get_prev(a__ )
if left <= current_left:
_A = max(a__ , self.tree[right] )
_A = current_left
else:
_A = max(a__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 621 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def __a ( a ):
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_a = k.replace(a, a )
if k.startswith("encoder" ):
_a = k.replace(".attn", ".self_attn" )
_a = k.replace("norm1", "self_attn_layer_norm" )
_a = k.replace("norm2", "final_layer_norm" )
elif k.startswith("decoder" ):
_a = k.replace("norm1", "self_attn_layer_norm" )
_a = k.replace("norm2", "encoder_attn_layer_norm" )
_a = k.replace("norm3", "final_layer_norm" )
return k
def __a ( a ):
"""simple docstring"""
_a = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
_a = sd.pop(a )
_a = k.replace("layernorm_embedding", "layer_norm" )
assert new_k not in sd
_a = v
__SCREAMING_SNAKE_CASE = ["""START"""]
@torch.no_grad()
def __a ( a, a, a ):
"""simple docstring"""
_a = torch.load(a, map_location="cpu" )
_a = model["model"]
_a = BlenderbotConfig.from_json_file(a )
_a = BlenderbotForConditionalGeneration(a )
_a = m.model.state_dict().keys()
_a = []
_a = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_a = rename_state_dict_key(a )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_a = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(a )
m.model.load_state_dict(a, strict=a )
m.half()
m.save_pretrained(a )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 388 |
"""simple docstring"""
from math import ceil
def __a ( a, a ):
"""simple docstring"""
_a = list(range(0, a ) )
_a = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_a = []
for i in device_map_blocks:
if device_map_blocks.count(a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(a )
# Missing blocks
_a = [i for i in blocks if i not in device_map_blocks]
_a = [i for i in device_map_blocks if i not in blocks]
if len(a ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(a ) )
def __a ( a, a ):
"""simple docstring"""
_a = list(range(a ) )
_a = int(ceil(n_layers / len(a ) ) )
_a = [layers[i : i + n_blocks] for i in range(0, a, a )]
return dict(zip(a, a ) )
| 388 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 165 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , _A=1_0_0_0 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_input_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =num_choices
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =range_bbox
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_SCREAMING_SNAKE_CASE =bbox[i, j, 3]
_SCREAMING_SNAKE_CASE =bbox[i, j, 1]
_SCREAMING_SNAKE_CASE =t
if bbox[i, j, 2] < bbox[i, j, 0]:
_SCREAMING_SNAKE_CASE =bbox[i, j, 2]
_SCREAMING_SNAKE_CASE =bbox[i, j, 0]
_SCREAMING_SNAKE_CASE =t
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(_A )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMModel(config=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , attention_mask=_A , token_type_ids=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , token_type_ids=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMForMaskedLM(config=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFLayoutLMForSequenceClassification(config=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFLayoutLMForTokenClassification(config=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMForQuestionAnswering(config=_A )
_SCREAMING_SNAKE_CASE =model(_A , _A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowercase : str = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase : Any = False
lowercase : List[str] = True
lowercase : Optional[int] = 10
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_A , hidden_size=3_7 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =TFLayoutLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase() -> str:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE =model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
# test the sequence output on [0, :3, :3]
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1E-3 ) )
# test the pooled output on [1, :3]
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _A , atol=1E-3 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE =model(
input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_SCREAMING_SNAKE_CASE =outputs.loss
_SCREAMING_SNAKE_CASE =(2,)
self.assertEqual(loss.shape , _A )
# test the shape of the logits
_SCREAMING_SNAKE_CASE =outputs.logits
_SCREAMING_SNAKE_CASE =(2, 2)
self.assertEqual(logits.shape , _A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=1_3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE =model(
input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A )
# test the shape of the logits
_SCREAMING_SNAKE_CASE =outputs.logits
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE =model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
# test the shape of the logits
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _A )
self.assertEqual(outputs.end_logits.shape , _A )
| 165 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_ ( UpperCAmelCase_ : int ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ):
lowercase : Tuple = np.max(_outputs , axis=-1 , keepdims=UpperCAmelCase_ )
lowercase : Tuple = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCAmelCase_ )
class UpperCAmelCase ( __lowerCamelCase ):
a__: Optional[Any] = """sigmoid"""
a__: List[Any] = """softmax"""
a__: Dict = """none"""
@add_end_docstrings(
__lowerCamelCase , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class UpperCAmelCase ( __lowerCamelCase ):
a__: str = False
a__: Dict = ClassificationFunction.NONE
def __init__( self : str , **lowerCAmelCase : Any ):
super().__init__(**lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowerCAmelCase ( self : int , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : str=None , lowerCAmelCase : int="" , **lowerCAmelCase : Union[str, Any] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowercase : Tuple = tokenizer_kwargs
lowercase : Tuple = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
lowercase : List[str] = self.model.config.return_all_scores
if isinstance(lowerCAmelCase , lowerCAmelCase ) or top_k is None:
lowercase : Optional[int] = top_k
lowercase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , lowerCAmelCase , )
if return_all_scores:
lowercase : List[Any] = None
else:
lowercase : Optional[int] = 1
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowercase : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowercase : List[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : str ):
lowercase : Tuple = super().__call__(*lowerCAmelCase , **lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowercase : List[str] = '''top_k''' not in kwargs
if isinstance(args[0] , lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowerCAmelCase ( self : Dict , lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] ):
lowercase : Tuple = self.framework
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return self.tokenizer(**lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) == 1 and isinstance(inputs[0] , lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCAmelCase , **lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
def _lowerCAmelCase ( self : str , lowerCAmelCase : Tuple ):
return self.model(**lowerCAmelCase )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict=None , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Union[str, Any]=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowercase : Optional[int] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowercase : Optional[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
lowercase : Optional[Any] = self.model.config.function_to_apply
else:
lowercase : Any = ClassificationFunction.NONE
lowercase : Optional[Any] = model_outputs['''logits'''][0]
lowercase : str = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowercase : Tuple = sigmoid(lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowercase : str = softmax(lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowercase : List[str] = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowercase : List[str] = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCAmelCase : x["score"] , reverse=lowerCAmelCase )
if top_k is not None:
lowercase : Optional[Any] = dict_scores[:top_k]
return dict_scores
| 583 |
def lowerCamelCase_ ( UpperCAmelCase_ : int | float | str ):
try:
lowercase : Dict = float(UpperCAmelCase_ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
lowercase : str = decimal - int(UpperCAmelCase_ )
if fractional_part == 0:
return int(UpperCAmelCase_ ), 1
else:
lowercase : Union[str, Any] = len(str(UpperCAmelCase_ ).split('''.''' )[1] )
lowercase : List[Any] = int(decimal * (10**number_of_frac_digits) )
lowercase : str = 10**number_of_frac_digits
lowercase , lowercase : str = denominator, numerator
while True:
lowercase : Any = dividend % divisor
if remainder == 0:
break
lowercase , lowercase : Union[str, Any] = divisor, remainder
lowercase , lowercase : str = numerator / divisor, denominator / divisor
return int(UpperCAmelCase_ ), int(UpperCAmelCase_ )
if __name__ == "__main__":
print(F'{decimal_to_fraction(2) = }')
print(F'{decimal_to_fraction(8_9.0) = }')
print(F'{decimal_to_fraction("67") = }')
print(F'{decimal_to_fraction("45.0") = }')
print(F'{decimal_to_fraction(1.5) = }')
print(F'{decimal_to_fraction("6.25") = }')
print(F'{decimal_to_fraction("78td") = }')
| 583 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _UpperCamelCase :
'''simple docstring'''
@property
def a__ ( self ) -> Tuple:
return self.get_dummy_input()
@property
def a__ ( self ) -> Tuple:
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def a__ ( self , a_=True , a_=False , a_=False , a_=False , ) -> Tuple:
lowercase : Tuple = 4
lowercase : Optional[int] = 3_2
lowercase : Tuple = (3_2, 3_2)
lowercase : List[str] = torch.manual_seed(0 )
lowercase : List[Any] = torch.device(a_ )
lowercase : List[str] = (batch_size, num_channels) + sizes
lowercase : List[Any] = randn_tensor(a_ , generator=a_ , device=a_ )
lowercase : Union[str, Any] = {"hidden_states": hidden_states}
if include_temb:
lowercase : Optional[Any] = 1_2_8
lowercase : Optional[int] = randn_tensor((batch_size, temb_channels) , generator=a_ , device=a_ )
if include_res_hidden_states_tuple:
lowercase : Dict = torch.manual_seed(1 )
lowercase : Tuple = (randn_tensor(a_ , generator=a_ , device=a_ ),)
if include_encoder_hidden_states:
lowercase : Optional[Any] = floats_tensor((batch_size, 3_2, 3_2) ).to(a_ )
if include_skip_sample:
lowercase : Tuple = randn_tensor(((batch_size, 3) + sizes) , generator=a_ , device=a_ )
return dummy_input
def a__ ( self ) -> Optional[Any]:
lowercase : Union[str, Any] = {
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
lowercase : Tuple = 3_2
if self.block_type == "mid":
init_dict.pop("out_channels" )
lowercase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def a__ ( self , a_ ) -> Optional[Any]:
lowercase : Optional[int] = self.prepare_init_args_and_inputs_for_common()
lowercase : Union[str, Any] = self.block_class(**a_ )
unet_block.to(a_ )
unet_block.eval()
with torch.no_grad():
lowercase : Optional[Any] = unet_block(**a_ )
if isinstance(a_ , a_ ):
lowercase : Any = output[0]
self.assertEqual(output.shape , self.output_shape )
lowercase : Optional[Any] = output[0, -1, -3:, -3:]
lowercase : Union[str, Any] = torch.tensor(a_ ).to(a_ )
assert torch_all_close(output_slice.flatten() , a_ , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def a__ ( self ) -> int:
lowercase : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
lowercase : List[Any] = self.block_class(**a_ )
model.to(a_ )
model.train()
lowercase : Optional[int] = model(**a_ )
if isinstance(a_ , a_ ):
lowercase : List[str] = output[0]
lowercase : Optional[int] = torch.device(a_ )
lowercase : Any = randn_tensor(output.shape , device=a_ )
lowercase : Optional[int] = torch.nn.functional.mse_loss(a_ , a_ )
loss.backward()
| 715 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase : Any = 1.054571817E-34 # unit of ℏ : J * s
lowerCAmelCase : List[str] = 3E8 # unit of c : m * s^-1
def _A ( A ,A ,A ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowercase : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase : Optional[int] = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase : List[str] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425 | 0 |
from timeit import timeit
lowerCamelCase__ = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def A(__a: Optional[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def A(__a: Dict ):
lowerCAmelCase_ = len(__a ) // 2
lowerCAmelCase_ = len(__a )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__a ) )
def A(__a: Any ):
if len(__a ) <= 2:
return True
if s[0] == s[len(__a ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def A(__a: List[Any] ):
return s == s[::-1]
def A(__a: str ):
lowerCAmelCase_ = F"all({name}(key) is value for key, value in test_data.items())"
lowerCAmelCase_ = F"from __main__ import test_data, {name}"
lowerCAmelCase_ = 50_0000
lowerCAmelCase_ = timeit(stmt=__a , setup=__a , number=__a )
print(F"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 122 |
from __future__ import annotations
import pandas as pd
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = [0] * no_of_processes
UpperCAmelCase_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A ):
UpperCAmelCase_ = burst_time[i]
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 999999999
UpperCAmelCase_ = 0
UpperCAmelCase_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase_ = remaining_time[j]
UpperCAmelCase_ = j
UpperCAmelCase_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase_ = remaining_time[short]
if minm == 0:
UpperCAmelCase_ = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase_ = False
# Find finish time of current process
UpperCAmelCase_ = increment_time + 1
# Calculate waiting time
UpperCAmelCase_ = finish_time - arrival_time[short]
UpperCAmelCase_ = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase_ = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = [0] * no_of_processes
for i in range(A ):
UpperCAmelCase_ = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(A ):
UpperCAmelCase_ = total_waiting_time + waiting_time[i]
UpperCAmelCase_ = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_a: str = int(input())
_a: List[str] = [0] * no_of_processes
_a: Dict = [0] * no_of_processes
_a: Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_a , _a: List[str] = map(int, input().split())
_a: List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_a: Optional[int] = burst_time
_a: List[Any] = no_of_processes
_a: List[Any] = waiting_time
_a: List[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_a: Any = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs) | 162 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCamelCase__ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE = '''swin'''
SCREAMING_SNAKE_CASE = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,A=224 ,A=4 ,A=3 ,A=96 ,A=[2, 2, 6, 2] ,A=[3, 6, 12, 24] ,A=7 ,A=4.0 ,A=True ,A=0.0 ,A=0.0 ,A=0.1 ,A="gelu" ,A=False ,A=0.02 ,A=1e-5 ,A=32 ,A=None ,A=None ,**A ,):
super().__init__(**A )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = len(A )
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase = int(embed_dim * 2 ** (len(A ) - 1) )
UpperCAmelCase = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 ,len(A ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=A ,out_indices=A ,stage_names=self.stage_names )
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _UpperCamelCase ( self ):
return 1e-4
| 714 |
"""simple docstring"""
def _a ( _snake_case ): # noqa: E741
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
UpperCAmelCase = 0
UpperCAmelCase = [0] * n
UpperCAmelCase = [False] * n
UpperCAmelCase = [False] * n
def dfs(_snake_case , _snake_case , _snake_case , _snake_case ):
if parent == root:
out_edge_count += 1
UpperCAmelCase = True
UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase = True
else:
UpperCAmelCase = min(low[at] , _snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
UpperCAmelCase = 0
UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case )
UpperCAmelCase = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
_UpperCamelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 74 | 0 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
a_ : List[Any] = logging.getLogger(__name__)
a_ : Tuple = {'facebook/bart-base': BartForConditionalGeneration}
a_ : Dict = {'facebook/bart-base': BartTokenizer}
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.')
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.')
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.')
SCREAMING_SNAKE_CASE = parser.parse_args()
return args
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase="cpu"):
SCREAMING_SNAKE_CASE = model_dict[model_name].from_pretrained(_UpperCAmelCase).to(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase)
if model_name in ["facebook/bart-base"]:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
return huggingface_model, tokenizer
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
model.eval()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase))
with torch.no_grad():
SCREAMING_SNAKE_CASE = 'My friends are cool but they eat too many carbs.'
SCREAMING_SNAKE_CASE = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='pt').to(model.device)
SCREAMING_SNAKE_CASE = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = remove_dup_initializers(os.path.abspath(_UpperCAmelCase))
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = onnxruntime.InferenceSession(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase),
'max_length': np.array(_UpperCAmelCase),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3)
logger.info('Model outputs from torch and ONNX Runtime are similar.')
logger.info('Success.')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = parse_args()
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE = torch.device(args.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase)
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
model.to(_UpperCAmelCase)
if args.max_length:
SCREAMING_SNAKE_CASE = args.max_length
if args.num_beams:
SCREAMING_SNAKE_CASE = args.num_beams
if args.output_file_path:
SCREAMING_SNAKE_CASE = args.output_file_path
else:
SCREAMING_SNAKE_CASE = 'BART.onnx'
logger.info('Exporting model to ONNX')
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ : Any = 'true'
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16):
set_seed(42)
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase)
model.to(accelerator.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return model, ddp_model, dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation')
def tokenize_function(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt')
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt')
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase)
targs.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase)
return logits, targs
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
assert (
len(_UpperCAmelCase) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}'''
def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False):
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase)
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no']
model.to(_UpperCAmelCase)
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase)
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels'])
SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''')
test_mrpc(_UpperCAmelCase , _UpperCAmelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''')
test_torch_metrics(_UpperCAmelCase , 99)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**')
SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(_UpperCAmelCase , 512)
accelerator.state._reset_state()
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ = 1_6
a_ = 3_2
def _a( UpperCamelCase__ : Accelerator, UpperCamelCase__ : int = 1_6, UpperCamelCase__ : str = "bert-base-cased" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =AutoTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCamelCase__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : int =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCamelCase__, max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ : str =datasets.map(
UpperCamelCase__, batched=UpperCamelCase__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Tuple =tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCamelCase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''' )
return tokenizer.pad(UpperCamelCase__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Any =DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Any =config['''lr''']
SCREAMING_SNAKE_CASE__ : int =int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : str =int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =args.model_name_or_path
set_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =get_dataloaders(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Any =AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__, return_dict=UpperCamelCase__ )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : List[Any] =(
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE__ : int =optimizer_cls(params=model.parameters(), lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE__ : Tuple =accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =1
SCREAMING_SNAKE_CASE__ : Any =(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__, num_warmup_steps=0, num_training_steps=UpperCamelCase__, )
else:
SCREAMING_SNAKE_CASE__ : List[str] =DummyScheduler(UpperCamelCase__, total_num_steps=UpperCamelCase__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =accelerator.prepare(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE__ : int =0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
# Now we train the model
SCREAMING_SNAKE_CASE__ : List[Any] =evaluate.load('''glue''', '''mrpc''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Dict ={}
for epoch in range(UpperCamelCase__, UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =outputs.loss
SCREAMING_SNAKE_CASE__ : Any =loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
SCREAMING_SNAKE_CASE__ : Any =0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCamelCase__ ) - 1:
SCREAMING_SNAKE_CASE__ : Any =predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE__ : Optional[Any] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCamelCase__, references=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : List[str] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
SCREAMING_SNAKE_CASE__ : Dict =eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, '''all_results.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=UpperCamelCase__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=UpperCamelCase__, )
parser.add_argument(
'''--output_dir''', type=UpperCamelCase__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--performance_lower_bound''', type=UpperCamelCase__, default=UpperCamelCase__, help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''', )
parser.add_argument(
'''--num_epochs''', type=UpperCamelCase__, default=3, help='''Number of train epochs.''', )
SCREAMING_SNAKE_CASE__ : Any =parser.parse_args()
SCREAMING_SNAKE_CASE__ : str ={'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main() | 665 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer'''
with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Any =state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' )
SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9)
SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0)
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : Dict =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for entry in data:
SCREAMING_SNAKE_CASE__ : Tuple =entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : str =entity_id
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}"
SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 665 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a__ ( A__ ):
UpperCAmelCase__ = '''deformable_detr'''
UpperCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self :Union[str, Any] , _lowerCamelCase :Tuple=True , _lowerCamelCase :str=None , _lowerCamelCase :Optional[int]=3 , _lowerCamelCase :Union[str, Any]=300 , _lowerCamelCase :Union[str, Any]=1_024 , _lowerCamelCase :Union[str, Any]=6 , _lowerCamelCase :str=1_024 , _lowerCamelCase :List[Any]=8 , _lowerCamelCase :int=6 , _lowerCamelCase :List[Any]=1_024 , _lowerCamelCase :Any=8 , _lowerCamelCase :List[Any]=0.0 , _lowerCamelCase :Any=True , _lowerCamelCase :Union[str, Any]="relu" , _lowerCamelCase :str=256 , _lowerCamelCase :Union[str, Any]=0.1 , _lowerCamelCase :Tuple=0.0 , _lowerCamelCase :Union[str, Any]=0.0 , _lowerCamelCase :Dict=0.02 , _lowerCamelCase :Union[str, Any]=1.0 , _lowerCamelCase :int=True , _lowerCamelCase :List[str]=False , _lowerCamelCase :Optional[int]="sine" , _lowerCamelCase :int="resnet50" , _lowerCamelCase :Any=True , _lowerCamelCase :Tuple=False , _lowerCamelCase :int=4 , _lowerCamelCase :List[str]=4 , _lowerCamelCase :List[str]=4 , _lowerCamelCase :List[Any]=False , _lowerCamelCase :Union[str, Any]=300 , _lowerCamelCase :Dict=False , _lowerCamelCase :Tuple=1 , _lowerCamelCase :Union[str, Any]=5 , _lowerCamelCase :Optional[Any]=2 , _lowerCamelCase :int=1 , _lowerCamelCase :List[Any]=1 , _lowerCamelCase :Optional[int]=5 , _lowerCamelCase :int=2 , _lowerCamelCase :str=0.1 , _lowerCamelCase :Union[str, Any]=0.25 , _lowerCamelCase :str=False , **_lowerCamelCase :Union[str, Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCamelCase_ : Optional[Any] =CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : Optional[Any] =backbone_config.get('model_type' )
UpperCamelCase_ : int =CONFIG_MAPPING[backbone_model_type]
UpperCamelCase_ : Dict =config_class.from_dict(_lowerCamelCase )
UpperCamelCase_ : Optional[int] =use_timm_backbone
UpperCamelCase_ : Any =backbone_config
UpperCamelCase_ : str =num_channels
UpperCamelCase_ : Tuple =num_queries
UpperCamelCase_ : Tuple =max_position_embeddings
UpperCamelCase_ : Dict =d_model
UpperCamelCase_ : List[Any] =encoder_ffn_dim
UpperCamelCase_ : Tuple =encoder_layers
UpperCamelCase_ : Optional[Any] =encoder_attention_heads
UpperCamelCase_ : Optional[Any] =decoder_ffn_dim
UpperCamelCase_ : List[Any] =decoder_layers
UpperCamelCase_ : List[Any] =decoder_attention_heads
UpperCamelCase_ : Dict =dropout
UpperCamelCase_ : List[Any] =attention_dropout
UpperCamelCase_ : Tuple =activation_dropout
UpperCamelCase_ : str =activation_function
UpperCamelCase_ : str =init_std
UpperCamelCase_ : List[Any] =init_xavier_std
UpperCamelCase_ : Dict =encoder_layerdrop
UpperCamelCase_ : Union[str, Any] =auxiliary_loss
UpperCamelCase_ : Dict =position_embedding_type
UpperCamelCase_ : Union[str, Any] =backbone
UpperCamelCase_ : str =use_pretrained_backbone
UpperCamelCase_ : Any =dilation
# deformable attributes
UpperCamelCase_ : Optional[int] =num_feature_levels
UpperCamelCase_ : int =encoder_n_points
UpperCamelCase_ : Union[str, Any] =decoder_n_points
UpperCamelCase_ : Optional[int] =two_stage
UpperCamelCase_ : Tuple =two_stage_num_proposals
UpperCamelCase_ : List[Any] =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
UpperCamelCase_ : Dict =class_cost
UpperCamelCase_ : Any =bbox_cost
UpperCamelCase_ : List[Any] =giou_cost
# Loss coefficients
UpperCamelCase_ : Optional[Any] =mask_loss_coefficient
UpperCamelCase_ : Tuple =dice_loss_coefficient
UpperCamelCase_ : Optional[int] =bbox_loss_coefficient
UpperCamelCase_ : Tuple =giou_loss_coefficient
UpperCamelCase_ : Any =eos_coefficient
UpperCamelCase_ : List[Any] =focal_alpha
UpperCamelCase_ : List[Any] =disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
return self.d_model
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase_ : Any =self.backbone_config.to_dict()
UpperCamelCase_ : Tuple =self.__class__.model_type
return output
| 357 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
__SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': 1_536,
'junnyu/roformer_chinese_base': 1_536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
__SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = RoFormerTokenizer
def __init__( self :Tuple , _lowerCamelCase :Dict=None , _lowerCamelCase :Dict=None , _lowerCamelCase :List[Any]=True , _lowerCamelCase :Dict="[UNK]" , _lowerCamelCase :List[str]="[SEP]" , _lowerCamelCase :str="[PAD]" , _lowerCamelCase :Optional[Any]="[CLS]" , _lowerCamelCase :Optional[int]="[MASK]" , _lowerCamelCase :str=True , _lowerCamelCase :Tuple=None , **_lowerCamelCase :Any , ):
'''simple docstring'''
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_ : Any =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , _lowerCamelCase ) != do_lower_case
or pre_tok_state.get('strip_accents' , _lowerCamelCase ) != strip_accents
):
UpperCamelCase_ : Optional[Any] =getattr(_lowerCamelCase , pre_tok_state.pop('type' ) )
UpperCamelCase_ : Tuple =do_lower_case
UpperCamelCase_ : Union[str, Any] =strip_accents
UpperCamelCase_ : Union[str, Any] =pre_tok_class(**_lowerCamelCase )
UpperCamelCase_ : List[str] =do_lower_case
def __getstate__( self :Any ):
'''simple docstring'''
UpperCamelCase_ : str =self.__dict__.copy()
UpperCamelCase_ : Union[str, Any] =BertPreTokenizer()
return state
def __setstate__( self :str , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : int =d
UpperCamelCase_ : Optional[int] =self.__dict__['_tokenizer'].get_vocab()
UpperCamelCase_ : Any =PreTokenizer.custom(JiebaPreTokenizer(_lowerCamelCase ) )
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Any=None ):
'''simple docstring'''
UpperCamelCase_ : int =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =[self.sep_token_id]
UpperCamelCase_ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def lowerCamelCase_ ( self :str , _lowerCamelCase :List[str] , _lowerCamelCase :List[Any]=None , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :int=False , **_lowerCamelCase :Optional[int] , ):
'''simple docstring'''
UpperCamelCase_ : str =BertPreTokenizer()
return super().save_pretrained(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
| 357 | 1 |
def __UpperCAmelCase ( snake_case_ : int ):
'''simple docstring'''
UpperCAmelCase: str = abs(snake_case_ )
UpperCAmelCase: int = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def __UpperCAmelCase ( snake_case_ : int ):
'''simple docstring'''
UpperCAmelCase: Union[str, Any] = abs(snake_case_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def __UpperCAmelCase ( snake_case_ : int ):
'''simple docstring'''
return sum(int(snake_case_ ) for c in str(abs(snake_case_ ) ) )
def __UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case_ : Callable , snake_case_ : int ) -> None:
UpperCAmelCase: List[Any] = F'{func.__name__}({value})'
UpperCAmelCase: Optional[int] = timeit(F'__main__.{call}' , setup="import __main__" )
print(F'{call:56} = {func(snake_case_ )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case_ , snake_case_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 166 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCamelCase ( pl.LightningModule ):
def __init__( self , __snake_case ) -> int:
"""simple docstring"""
super().__init__()
UpperCAmelCase: Optional[Any] = model
UpperCAmelCase: str = 2
UpperCAmelCase: Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A__ ( self ) -> List[str]:
"""simple docstring"""
pass
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , snake_case_ : str ):
'''simple docstring'''
UpperCAmelCase: List[str] = LongformerModel.from_pretrained(snake_case_ )
UpperCAmelCase: Tuple = LightningModel(snake_case_ )
UpperCAmelCase: List[Any] = torch.load(snake_case_ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
UpperCAmelCase: Dict = LongformerForQuestionAnswering.from_pretrained(snake_case_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(snake_case_ )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 166 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
lowerCAmelCase_ = MBartConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = """gelu"""
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=1_3 , snake_case__ : str=7 , snake_case__ : int=True , snake_case__ : Optional[int]=False , snake_case__ : List[str]=9_9 , snake_case__ : Optional[int]=3_2 , snake_case__ : str=2 , snake_case__ : str=4 , snake_case__ : Any=3_7 , snake_case__ : str=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=2_0 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=1 , snake_case__ : Union[str, Any]=0 , ) -> Dict:
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = eos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = bos_token_id
def _snake_case ( self : str ) -> Optional[int]:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCamelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _snake_case ( self : str , snake_case__ : Dict , snake_case__ : Optional[int] ) -> int:
_lowerCamelCase = TFMBartModel(config=_A ).get_decoder()
_lowerCamelCase = inputs_dict["""input_ids"""]
_lowerCamelCase = input_ids[:1, :]
_lowerCamelCase = inputs_dict["""attention_mask"""][:1, :]
_lowerCamelCase = inputs_dict["""head_mask"""]
_lowerCamelCase = 1
# first forward pass
_lowerCamelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
_lowerCamelCase = outputs.to_tuple()
_lowerCamelCase = past_key_values[1]
def lowerCamelCase ( UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str]=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , UpperCamelCase : int=None , UpperCamelCase : Tuple=None , ) -> int:
if attention_mask is None:
_lowerCamelCase = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__ ( __a ,__a ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self : int , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[Any] ) -> Dict:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _snake_case ( self : Any ) -> int:
_lowerCamelCase = TFMBartModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=_A )
def _snake_case ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
def _snake_case ( self : List[str] ) -> Optional[int]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowerCAmelCase_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowerCAmelCase_ = """facebook/mbart-large-en-ro"""
@cached_property
def _snake_case ( self : List[Any] ) -> Optional[int]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _snake_case ( self : int ) -> Dict:
_lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _snake_case ( self : Any , **snake_case__ : Optional[Any] ) -> Tuple:
_lowerCamelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _snake_case ( self : Optional[int] , **snake_case__ : Any ) -> Union[str, Any]:
_lowerCamelCase = self.tokenizer(self.src_text , **_A , return_tensors='tf' )
_lowerCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_lowerCamelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _snake_case ( self : List[Any] ) -> str:
self._assert_generated_batch_equal_expected() | 544 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class _UpperCAmelCase ( __a):
__a : List[Any] = """xmod"""
def __init__( self , _A=3_05_22 , _A=7_68 , _A=12 , _A=12 , _A=30_72 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_12 , _A=2 , _A=0.02 , _A=1e-12 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=None , _A=False , _A=2 , _A=False , _A=True , _A=True , _A=("en_XX",) , _A=None , **_A , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : List[Any] = type_vocab_size
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Any = position_embedding_type
_UpperCAmelCase : Tuple = use_cache
_UpperCAmelCase : Optional[int] = classifier_dropout
_UpperCAmelCase : Dict = pre_norm
_UpperCAmelCase : str = adapter_reduction_factor
_UpperCAmelCase : List[str] = adapter_layer_norm
_UpperCAmelCase : Union[str, Any] = adapter_reuse_layer_norm
_UpperCAmelCase : List[Any] = ln_before_adapter
_UpperCAmelCase : Any = list(_A )
_UpperCAmelCase : Dict = default_language
class _UpperCAmelCase ( __a):
@property
def __snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 238 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_lowerCAmelCase = """docs/source/en/_toctree.yml"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = defaultdict(_lowerCamelCase )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[int] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Any = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : Union[str, Any] = []
for duplicate_key in duplicates:
_lowerCAmelCase : Optional[Any] = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowerCAmelCase : int = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def lowerCamelCase__ ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding='utf-8' ) as f:
_lowerCAmelCase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : str = api_doc[scheduler_idx]['sections']
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : Optional[int] = True
if overwrite:
_lowerCAmelCase : List[Any] = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowerCamelCase__ ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding='utf-8' ) as f:
_lowerCAmelCase : str = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : Any = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase : Optional[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Tuple = api_doc[pipeline_idx]['sections']
_lowerCAmelCase : Dict = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : Any = pipeline_doc['section']
_lowerCAmelCase : Dict = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : List[str] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Tuple = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Optional[Any] = True
if overwrite:
_lowerCAmelCase : Union[str, Any] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : str = api_doc
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowerCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 701 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 0 |
from __future__ import annotations
import queue
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = data
__lowerCamelCase = None
__lowerCamelCase = None
def lowerCamelCase_ ( ) -> TreeNode:
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
__lowerCamelCase = input('Enter the value of the root node: ' ).strip().lower()
__lowerCamelCase = queue.Queue()
__lowerCamelCase = TreeNode(int(UpperCamelCase__ ) )
q.put(UpperCamelCase__ )
while not q.empty():
__lowerCamelCase = q.get()
__lowerCamelCase = F"""Enter the left node of {node_found.data}: """
__lowerCamelCase = input(UpperCamelCase__ ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowerCamelCase = TreeNode(int(UpperCamelCase__ ) )
__lowerCamelCase = left_node
q.put(UpperCamelCase__ )
__lowerCamelCase = F"""Enter the right node of {node_found.data}: """
__lowerCamelCase = input(UpperCamelCase__ ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowerCamelCase = TreeNode(int(UpperCamelCase__ ) )
__lowerCamelCase = right_node
q.put(UpperCamelCase__ )
raise
def lowerCamelCase_ ( UpperCamelCase__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( UpperCamelCase__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( UpperCamelCase__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( UpperCamelCase__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
__lowerCamelCase = queue.Queue()
q.put(UpperCamelCase__ )
while not q.empty():
__lowerCamelCase = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( UpperCamelCase__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
__lowerCamelCase = queue.Queue()
q.put(UpperCamelCase__ )
while not q.empty():
__lowerCamelCase = []
while not q.empty():
__lowerCamelCase = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
__lowerCamelCase = []
__lowerCamelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(UpperCamelCase__ )
__lowerCamelCase = n.left
# end of while means current node doesn't have left child
__lowerCamelCase = stack.pop()
# start to traverse its right child
__lowerCamelCase = n.right
def lowerCamelCase_ ( UpperCamelCase__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
__lowerCamelCase = []
__lowerCamelCase = node
while n or stack:
while n:
stack.append(UpperCamelCase__ )
__lowerCamelCase = n.left
__lowerCamelCase = stack.pop()
print(n.data , end=',' )
__lowerCamelCase = n.right
def lowerCamelCase_ ( UpperCamelCase__ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
__lowerCamelCase = [], []
__lowerCamelCase = node
stacka.append(UpperCamelCase__ )
while stacka: # to find the reversed order of post order, store it in stack2
__lowerCamelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCamelCase__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( UpperCamelCase__ : str = "" , UpperCamelCase__ : str=50 , UpperCamelCase__ : Tuple="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
__lowerCamelCase = divmod(width - len(UpperCamelCase__ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
__A = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 469 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
lowerCAmelCase_ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
lowerCAmelCase_ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
lowerCAmelCase_ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 411 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : List[Any] = 42
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self ,lowerCamelCase_ = 1 ,lowerCamelCase_ = 50 ,lowerCamelCase_ = None ,lowerCamelCase_ = "pil" ,lowerCamelCase_ = True ,**lowerCamelCase_ ,) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = self.unet.config.sample_size
UpperCAmelCase__ : Optional[Any] = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ : Any = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase__ : Optional[Any] = randn_tensor(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase__ : int = self.scheduler.schedule[t]
UpperCAmelCase__ : str = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.scheduler.add_noise_to_input(_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase__ : Any = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase__ : Tuple = self.scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase__ : List[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
UpperCAmelCase__ : str = self.scheduler.step_correct(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,step_output.prev_sample ,step_output['''derivative'''] ,)
UpperCAmelCase__ : int = step_output.prev_sample
UpperCAmelCase__ : Dict = (sample / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase__ : List[str] = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ : Optional[Any] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 709 | '''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {'vocab_file': 'vocab.json'}
UpperCamelCase__ : Tuple = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
UpperCamelCase__ : List[Any] = {'mgp-str': 27}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = VOCAB_FILES_NAMES
UpperCAmelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_="[GO]" ,lowerCamelCase_="[GO]" ,lowerCamelCase_="[s]" ,lowerCamelCase_="[GO]" ,**lowerCamelCase_ ) -> str:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
UpperCAmelCase__ : Any = {v: k for k, v in self.vocab.items()}
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return len(self.vocab )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return dict(self.vocab ,**self.added_tokens_encoder )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ ,self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
UpperCAmelCase__ : Union[str, Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 496 | 0 |
"""simple docstring"""
import re
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(__UpperCamelCase , __UpperCamelCase ) )
if __name__ == "__main__":
__UpperCAmelCase = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 65 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(snake_case_ , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(snake_case_ , '''num_encoder_blocks''' ) )
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=6_4 , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[1_6, 3_2, 6_4, 1_2_8] , snake_case_=[1, 4, 8, 1_6] , snake_case_=[1, 2, 4, 8] , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0_2 , snake_case_=3 , snake_case_=None , ) -> int:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_encoder_blocks
__lowercase = sr_ratios
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = downsampling_rates
__lowercase = num_attention_heads
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = scope
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
'''simple docstring'''
__lowercase = SegformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ )
__lowercase = __lowercase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> str:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = SegformerForSemanticSegmentation(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__lowercase = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
'''simple docstring'''
__lowercase = 1
__lowercase = SegformerForSemanticSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(snake_case_ )
__lowercase = model(snake_case_ , labels=snake_case_ )
self.parent.assertGreater(result.loss , 0.0 )
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def A ( self ) -> Any:
'''simple docstring'''
__lowercase = SegformerModelTester(self )
__lowercase = SegformerConfigTester(self , config_class=snake_case_ )
def A ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*snake_case_ )
def A ( self ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*snake_case_ )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def A ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def A ( self ) -> str:
'''simple docstring'''
pass
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(snake_case_ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowercase = outputs.attentions
__lowercase = sum(self.model_tester.depths )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowercase = outputs.attentions
self.assertEqual(len(snake_case_ ) , snake_case_ )
# verify the first attentions (first block, first layer)
__lowercase = (self.model_tester.image_size // 4) ** 2
__lowercase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__lowercase = (self.model_tester.image_size // 3_2) ** 2
__lowercase = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__lowercase = len(snake_case_ )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 1 , len(snake_case_ ) )
__lowercase = outputs.attentions
self.assertEqual(len(snake_case_ ) , snake_case_ )
# verify the first attentions (first block, first layer)
__lowercase = (self.model_tester.image_size // 4) ** 2
__lowercase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def A ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowercase = outputs.hidden_states
__lowercase = self.model_tester.num_encoder_blocks
self.assertEqual(len(snake_case_ ) , snake_case_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def A ( self ) -> str:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case_ ):
continue
__lowercase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
__lowercase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
__lowercase = model(**snake_case_ ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def A ( self ) -> str:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = SegformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase_ ( ):
'''simple docstring'''
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case_ , align=snake_case_ , do_random_crop=snake_case_ )
__lowercase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
snake_case_ )
__lowercase = prepare_img()
__lowercase = image_processor(images=snake_case_ , return_tensors='''pt''' )
__lowercase = encoded_inputs.pixel_values.to(snake_case_ )
with torch.no_grad():
__lowercase = model(snake_case_ )
__lowercase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__lowercase = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case_ , atol=1e-4 ) )
@slow
def A ( self ) -> int:
'''simple docstring'''
__lowercase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case_ , align=snake_case_ , do_random_crop=snake_case_ )
__lowercase = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(snake_case_ )
__lowercase = prepare_img()
__lowercase = image_processor(images=snake_case_ , return_tensors='''pt''' )
__lowercase = encoded_inputs.pixel_values.to(snake_case_ )
with torch.no_grad():
__lowercase = model(snake_case_ )
__lowercase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__lowercase = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case_ , atol=1e-1 ) )
@slow
def A ( self ) -> int:
'''simple docstring'''
__lowercase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case_ , align=snake_case_ , do_random_crop=snake_case_ )
__lowercase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
snake_case_ )
__lowercase = prepare_img()
__lowercase = image_processor(images=snake_case_ , return_tensors='''pt''' )
__lowercase = encoded_inputs.pixel_values.to(snake_case_ )
with torch.no_grad():
__lowercase = model(snake_case_ )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=snake_case_ , target_sizes=[(5_0_0, 3_0_0)] )
__lowercase = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case_ )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=snake_case_ )
__lowercase = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , snake_case_ )
| 639 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__lowercase : Tuple = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[int] = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
__lowercase : int = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
__lowercase : Optional[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __lowercase ( __lowerCamelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Tuple = ElectraTokenizer
def __init__(self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCamelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCamelCase_ : List[str] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowerCamelCase_ : str = do_lower_case
lowerCamelCase_ : List[Any] = strip_accents
lowerCamelCase_ : Optional[int] = tokenize_chinese_chars
lowerCamelCase_ : Dict = normalizer_class(**UpperCamelCase_ )
lowerCamelCase_ : int = do_lower_case
def UpperCAmelCase__ (self , A , A=None ):
lowerCamelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : List[str] = [self.sep_token_id]
lowerCamelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 714 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = KandinskyVaaInpaintPipeline
lowerCamelCase : Tuple = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
lowerCamelCase : List[str] = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowerCamelCase : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCamelCase : List[Any] = False
@property
def UpperCAmelCase__ (self ):
return 3_2
@property
def UpperCAmelCase__ (self ):
return 3_2
@property
def UpperCAmelCase__ (self ):
return self.time_input_dim
@property
def UpperCAmelCase__ (self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase__ (self ):
return 1_0_0
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase_ : str = UNetaDConditionModel(**A )
return model
@property
def UpperCAmelCase__ (self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.dummy_unet
lowerCamelCase_ : Optional[int] = self.dummy_movq
lowerCamelCase_ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A , set_alpha_to_one=A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=A , )
lowerCamelCase_ : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ (self , A , A=0 ):
lowerCamelCase_ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A ) ).to(A )
lowerCamelCase_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A )
# create init_image
lowerCamelCase_ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(A ) ).to(A )
lowerCamelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
lowerCamelCase_ : List[str] = np.ones((6_4, 6_4) , dtype=np.floataa )
lowerCamelCase_ : Union[str, Any] = 0
if str(A ).startswith('''mps''' ):
lowerCamelCase_ : int = torch.manual_seed(A )
else:
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase_ : Optional[Any] = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = '''cpu'''
lowerCamelCase_ : str = self.get_dummy_components()
lowerCamelCase_ : Tuple = self.pipeline_class(**A )
lowerCamelCase_ : Union[str, Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : str = pipe(**self.get_dummy_inputs(A ) )
lowerCamelCase_ : Any = output.images
lowerCamelCase_ : Union[str, Any] = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
lowerCamelCase_ : List[Any] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase_ : Any = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase__ (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
lowerCamelCase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCamelCase_ : List[str] = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Union[str, Any] = '''a hat'''
lowerCamelCase_ : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A )
lowerCamelCase_ : Any = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
lowerCamelCase_ : Any = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
lowerCamelCase_ : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_, lowerCamelCase_ : Tuple = pipe_prior(
A , generator=A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCamelCase_ : Union[str, Any] = pipeline(
image=A , mask_image=A , image_embeds=A , negative_image_embeds=A , generator=A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(A , A )
| 357 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ = cva.getAffineTransform(__lowercase , __lowercase)
return cva.warpAffine(__lowercase , __lowercase , (rows, cols))
if __name__ == "__main__":
# read original image
snake_case__ : Optional[int] = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
snake_case__ : Any = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
snake_case__ , snake_case__ : Optional[Any] = gray_img.shape
# set different points to rotate image
snake_case__ : List[Any] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
snake_case__ : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
snake_case__ : Optional[int] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
snake_case__ : int = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
snake_case__ : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
snake_case__ : Optional[int] = plt.figure(1)
snake_case__ : int = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 23 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23 | 1 |
'''simple docstring'''
class a :
def __init__( self : str , lowercase_ : int ):
snake_case_ = size
snake_case_ = [0] * size
snake_case_ = [0] * size
@staticmethod
def A_ ( lowercase_ : int ):
return index | (index + 1)
@staticmethod
def A_ ( lowercase_ : int ):
return (index & (index + 1)) - 1
def A_ ( self : Dict , lowercase_ : int , lowercase_ : int ):
snake_case_ = value
while index < self.size:
snake_case_ = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
snake_case_ = value
else:
snake_case_ = max(lowercase_ , lowercase_ , lowercase_ )
snake_case_ = self.get_next(lowercase_ )
def A_ ( self : Dict , lowercase_ : int , lowercase_ : int ):
right -= 1 # Because of right is exclusive
snake_case_ = 0
while left <= right:
snake_case_ = self.get_prev(lowercase_ )
if left <= current_left:
snake_case_ = max(lowercase_ , self.tree[right] )
snake_case_ = current_left
else:
snake_case_ = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 593 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a : Tuple = logging.get_logger(__name__)
a : Any = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
a : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case_ = model_type_to_module_name(__UpperCAmelCase )
snake_case_ = importlib.import_module(F".{module_name}", '''transformers.models''' )
try:
return getattr(__UpperCAmelCase, __UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__UpperCAmelCase, '''__name__''', __UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case_ = importlib.import_module('''transformers''' )
if hasattr(__UpperCAmelCase, __UpperCAmelCase ):
return getattr(__UpperCAmelCase, __UpperCAmelCase )
return None
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = False, __UpperCAmelCase = False, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = False, **__UpperCAmelCase, ) -> Dict:
'''simple docstring'''
snake_case_ = get_file_from_repo(
__UpperCAmelCase, __UpperCAmelCase, cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, resume_download=__UpperCAmelCase, proxies=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, revision=__UpperCAmelCase, local_files_only=__UpperCAmelCase, )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__UpperCAmelCase, encoding='''utf-8''' ) as reader:
return json.load(__UpperCAmelCase )
class a :
def __init__( self : Dict ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowercase_ )
def A_ ( cls : List[Any] , lowercase_ : Optional[int] , **lowercase_ : Dict ):
snake_case_ = kwargs.pop('''config''' , lowercase_ )
snake_case_ = kwargs.pop('''trust_remote_code''' , lowercase_ )
snake_case_ = True
snake_case_ ,snake_case_ = FeatureExtractionMixin.get_feature_extractor_dict(lowercase_ , **lowercase_ )
snake_case_ = config_dict.get('''feature_extractor_type''' , lowercase_ )
snake_case_ = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
snake_case_ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
# It could be in `config.feature_extractor_type``
snake_case_ = getattr(lowercase_ , '''feature_extractor_type''' , lowercase_ )
if hasattr(lowercase_ , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
snake_case_ = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
snake_case_ = feature_extractor_class_from_name(lowercase_ )
snake_case_ = feature_extractor_auto_map is not None
snake_case_ = feature_extractor_class is not None or type(lowercase_ ) in FEATURE_EXTRACTOR_MAPPING
snake_case_ = resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if has_remote_code and trust_remote_code:
snake_case_ = get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_ )
snake_case_ = kwargs.pop('''code_revision''' , lowercase_ )
if os.path.isdir(lowercase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase_ ) in FEATURE_EXTRACTOR_MAPPING:
snake_case_ = FEATURE_EXTRACTOR_MAPPING[type(lowercase_ )]
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def A_ ( lowercase_ : Union[str, Any] , lowercase_ : Dict ):
FEATURE_EXTRACTOR_MAPPING.register(lowercase_ , lowercase_ )
| 593 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
a_ : Union[str, Any] = "mra"
def __init__( self : List[str] , _lowerCamelCase : List[Any]=5_0_2_6_5 , _lowerCamelCase : Dict=7_6_8 , _lowerCamelCase : Optional[Any]=1_2 , _lowerCamelCase : List[Any]=1_2 , _lowerCamelCase : List[Any]=3_0_7_2 , _lowerCamelCase : str="gelu" , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : int=0.1 , _lowerCamelCase : int=5_1_2 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : List[str]=0.02 , _lowerCamelCase : List[str]=1E-5 , _lowerCamelCase : Optional[Any]="absolute" , _lowerCamelCase : int=4 , _lowerCamelCase : Dict="full" , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : str=1 , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Any=2 , **_lowerCamelCase : str , ):
'''simple docstring'''
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Dict = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : str = initializer_range
__lowerCamelCase : int = type_vocab_size
__lowerCamelCase : List[Any] = layer_norm_eps
__lowerCamelCase : int = position_embedding_type
__lowerCamelCase : int = block_per_row
__lowerCamelCase : Tuple = approx_mode
__lowerCamelCase : Optional[int] = initial_prior_first_n_blocks
__lowerCamelCase : str = initial_prior_diagonal_n_blocks
| 519 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ (__lowercase ):
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , "embed_dim" ) )
self.parent.assertTrue(hasattr(_a , "num_heads" ) )
class __magic_name__ :
def __init__( self , _a , _a=13 , _a=64 , _a=3 , _a=[16, 48, 96] , _a=[1, 3, 6] , _a=[1, 2, 10] , _a=[7, 3, 3] , _a=[4, 2, 2] , _a=[2, 1, 1] , _a=[2, 2, 2] , _a=[False, False, True] , _a=[0.0, 0.0, 0.0] , _a=0.0_2 , _a=1E-12 , _a=True , _a=True , _a=2 , ) -> int:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_sizes
lowerCAmelCase_ = patch_stride
lowerCAmelCase_ = patch_padding
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = stride_kv
lowerCAmelCase_ = depth
lowerCAmelCase_ = cls_token
lowerCAmelCase_ = attention_drop_rate
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
def __a ( self ) -> Any:
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> str:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __a ( self , _a , _a , _a ) -> List[Any]:
lowerCAmelCase_ = CvtModel(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a )
lowerCAmelCase_ = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __a ( self , _a , _a , _a ) -> Optional[Any]:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = CvtForImageClassification(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = CvtModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __a ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self ) -> List[str]:
return
@unittest.skip(reason="Cvt does not output attentions" )
def __a ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __a ( self ) -> str:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __a ( self ) -> Union[str, Any]:
pass
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_a )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __a ( self ) -> List[Any]:
def check_hidden_states_output(_a , _a , _a ):
lowerCAmelCase_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = len(self.model_tester.depth )
self.assertEqual(len(_a ) , _a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(_a , _a , _a )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self ) -> Optional[Any]:
pass
@slow
def __a ( self ) -> Any:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = CvtModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def A():
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
@cached_property
def __a ( self ) -> str:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCAmelCase_ = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 122 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""vqvae"""]
def __init__( self : List[Any] , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : Mel , UpperCamelCase__ : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , mel=UpperCamelCase__ , vqvae=UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , UpperCamelCase__ ) else 1000
@torch.no_grad()
def __call__( self : str , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = None , UpperCamelCase__ : np.ndarray = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = None , UpperCamelCase__ : torch.Generator = None , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 0 , UpperCamelCase__ : torch.Generator = None , UpperCamelCase__ : float = 0 , UpperCamelCase__ : torch.Tensor = None , UpperCamelCase__ : torch.Tensor = None , UpperCamelCase__ : Union[str, Any]=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE : int = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE : List[str] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCamelCase__ , device=self.device , )
SCREAMING_SNAKE_CASE : Optional[Any] = noise
SCREAMING_SNAKE_CASE : List[str] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.mel.audio_slice_to_image(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE : Dict = (input_image / 255) * 2 - 1
SCREAMING_SNAKE_CASE : Dict = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(UpperCamelCase__ , 0 ) ).latent_dist.sample(
generator=UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : Any = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE : List[Any] = int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE : Any = int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE : Dict = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = self.unet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )['''sample''']
else:
SCREAMING_SNAKE_CASE : int = self.unet(UpperCamelCase__ , UpperCamelCase__ )['''sample''']
if isinstance(self.scheduler , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : str = self.scheduler.step(
model_output=UpperCamelCase__ , timestep=UpperCamelCase__ , sample=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , )['''prev_sample''']
else:
SCREAMING_SNAKE_CASE : str = self.scheduler.step(
model_output=UpperCamelCase__ , timestep=UpperCamelCase__ , sample=UpperCamelCase__ , generator=UpperCamelCase__ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE : Optional[int] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE : List[str] = 1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE : Any = self.vqvae.decode(UpperCamelCase__ )['''sample''']
SCREAMING_SNAKE_CASE : str = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE : List[str] = (images * 255).round().astype('''uint8''' )
SCREAMING_SNAKE_CASE : Optional[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCamelCase__ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
SCREAMING_SNAKE_CASE : Any = [self.mel.image_to_audio(UpperCamelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase__ ) )
@torch.no_grad()
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Image.Image] , UpperCamelCase__ : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , UpperCamelCase__ )
self.scheduler.set_timesteps(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE : Any = (sample / 255) * 2 - 1
SCREAMING_SNAKE_CASE : str = torch.Tensor(UpperCamelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE : Any = self.unet(UpperCamelCase__ , UpperCamelCase__ )['''sample''']
SCREAMING_SNAKE_CASE : Any = (1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __A ( UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = acos(torch.dot(torch.flatten(UpperCamelCase__ ) , torch.flatten(UpperCamelCase__ ) ) / torch.norm(UpperCamelCase__ ) / torch.norm(UpperCamelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase__ ) + sin(alpha * theta ) * xa / sin(UpperCamelCase__ )
| 34 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
UpperCAmelCase_ = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
UpperCAmelCase_ = 2
UpperCAmelCase_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCAmelCase_ = i
while n % i == 0:
UpperCAmelCase_ = n // i
i += 1
return int(snake_case_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 78 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None ) -> list[list[str]]:
UpperCamelCase__ : Tuple = word_bank or []
# create a table
UpperCamelCase__ : int = len(__lowerCAmelCase ) + 1
UpperCamelCase__ : list[list[list[str]]] = []
for _ in range(__lowerCAmelCase ):
table.append([] )
# seed value
UpperCamelCase__ : int = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCAmelCase )] == word:
UpperCamelCase__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCAmelCase )]:
combination.reverse()
return table[len(__lowerCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 228 | 0 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _lowerCamelCase ( __a, __a=False ):
SCREAMING_SNAKE_CASE_ = OmegaConf.load(__a )
if display:
print(yaml.dump(OmegaConf.to_container(__a ) ) )
return config
def _lowerCamelCase ( __a, __a=None, __a=None ):
if conf_path is None:
SCREAMING_SNAKE_CASE_ = '''./model_checkpoints/vqgan_only.yaml'''
SCREAMING_SNAKE_CASE_ = load_config(__a, display=__a )
SCREAMING_SNAKE_CASE_ = VQModel(**config.model.params )
if ckpt_path is None:
SCREAMING_SNAKE_CASE_ = '''./model_checkpoints/vqgan_only.pt'''
SCREAMING_SNAKE_CASE_ = torch.load(__a, map_location=__a )
if ".ckpt" in ckpt_path:
SCREAMING_SNAKE_CASE_ = sd['''state_dict''']
model.load_state_dict(__a, strict=__a )
model.to(__a )
del sd
return model
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = model.encode(__a )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
SCREAMING_SNAKE_CASE_ = model.decode(__a )
return xrec
def _lowerCamelCase ( __a, __a=False ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = string.rsplit('''.''', 1 )
if reload:
SCREAMING_SNAKE_CASE_ = importlib.import_module(__a )
importlib.reload(__a )
return getattr(importlib.import_module(__a, package=__a ), cls )
def _lowerCamelCase ( __a ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''', {} ) )
def _lowerCamelCase ( __a, __a, __a=True, __a=True ):
SCREAMING_SNAKE_CASE_ = instantiate_from_config(__a )
if sd is not None:
model.load_state_dict(__a )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _lowerCamelCase ( __a, __a, __a, __a ):
# load the specified checkpoint
if ckpt:
SCREAMING_SNAKE_CASE_ = torch.load(__a, map_location='''cpu''' )
SCREAMING_SNAKE_CASE_ = pl_sd['''global_step''']
print(F'loaded model from global step {global_step}.' )
else:
SCREAMING_SNAKE_CASE_ = {'''state_dict''': None}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = load_model_from_config(config.model, pl_sd['''state_dict'''], gpu=__a, eval_mode=__a )['''model''']
return model, global_step | 719 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a, __a, __a ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__a )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__a )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__a, __a, is_trivia_qa=__a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 628 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_UpperCamelCase = 2048
_UpperCamelCase = 4096
_UpperCamelCase = 42
_UpperCamelCase = os.environ.pop('''PROCESS_TRAIN''', '''false''')
_UpperCamelCase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def UpperCamelCase_( snake_case__: Any ) -> List[Any]:
def choose_first(snake_case__: List[Any] , snake_case__: Any=False ):
assert isinstance(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
UpperCAmelCase__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCAmelCase__ = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
UpperCAmelCase__ = {'id': example['id']}
UpperCAmelCase__ = example['annotations']
UpperCAmelCase__ = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCAmelCase__ = ['yes'] if 1 in yes_no_answer else ['no']
UpperCAmelCase__ = UpperCAmelCase__ = []
UpperCAmelCase__ = UpperCAmelCase__ = []
UpperCAmelCase__ = ['<cls>']
else:
UpperCAmelCase__ = ['short']
UpperCAmelCase__ = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
UpperCAmelCase__ = ['long']
UpperCAmelCase__ = choose_first(annotation['long_answer'] , is_long_answer=snake_case__ )
UpperCAmelCase__ = []
answer.update(snake_case__ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = False
UpperCAmelCase__ = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , snake_case__ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def UpperCamelCase_( snake_case__: Any , snake_case__: Optional[int]=False ) -> Dict:
UpperCAmelCase__ = _get_single_answer(snake_case__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ = example['document']['tokens']
UpperCAmelCase__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(snake_case__ ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCAmelCase__ = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCAmelCase__ = example['document']['tokens']
UpperCAmelCase__ = answer['start_token']
UpperCAmelCase__ = answer['end_token']
UpperCAmelCase__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCAmelCase__ = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCAmelCase__ = doc['is_html'][answer['start_token'] : answer['end_token']]
UpperCAmelCase__ = doc['token'][answer['start_token'] : answer['end_token']]
UpperCAmelCase__ = ' '.join([old[i] for i in range(len(snake_case__ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , snake_case__ , end='\n' )
print('Old:' , snake_case__ , end='\n\n' )
return {
"context": " ".join(snake_case__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: Optional[Any] , snake_case__: List[Any]=20_48 , snake_case__: Optional[int]=40_96 , snake_case__: Union[str, Any]=True ) -> Dict:
# overlap will be of doc_stride - q_len
UpperCAmelCase__ = get_context_and_ans(snake_case__ , assertion=snake_case__ )
UpperCAmelCase__ = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCAmelCase__ = tokenizer(example['question']['text'] , out['context'] ).input_ids
UpperCAmelCase__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = input_ids[:q_len]
UpperCAmelCase__ = range(snake_case__ , len(snake_case__ ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCAmelCase__ = i + max_length - q_len
UpperCAmelCase__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(snake_case__ ),
"end_token": [-1_00] * len(snake_case__ ),
"category": category,
},
}
UpperCAmelCase__ = out['context'].split()
UpperCAmelCase__ = splitted_context[answer['end_token']]
UpperCAmelCase__ = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=snake_case__ , ).input_ids )
UpperCAmelCase__ = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=snake_case__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCAmelCase__ = len(tokenizer(snake_case__ , add_special_tokens=snake_case__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCAmelCase__ = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
UpperCAmelCase__ = answer['start_token']
UpperCAmelCase__ = answer['end_token']
if assertion:
UpperCAmelCase__ = tokenizer.decode(snake_case__ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , snake_case__ , end='\n\n' )
if len(snake_case__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCAmelCase__ = input_ids[:q_len]
UpperCAmelCase__ = range(snake_case__ , len(snake_case__ ) , max_length - doc_stride )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCAmelCase__ = i + max_length - q_len
UpperCAmelCase__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCAmelCase__ = start_token - i + q_len
UpperCAmelCase__ = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
UpperCAmelCase__ = -1_00
UpperCAmelCase__ = -1_00
answers_category.append('null' )
UpperCAmelCase__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(snake_case__ )
answers_end_token.append(snake_case__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(snake_case__ ) )
print('Old:' , tokenizer.decode(snake_case__ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCamelCase_( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[Any]=20_48 , snake_case__: Tuple=40_96 , snake_case__: Optional[int]=False ) -> str:
UpperCAmelCase__ = get_strided_contexts_and_ans(
snake_case__ , snake_case__ , doc_stride=snake_case__ , max_length=snake_case__ , assertion=snake_case__ , )
return example
def UpperCamelCase_( snake_case__: List[Any] , snake_case__: str ) -> Tuple:
with jsonlines.open(snake_case__ , 'a' ) as writer:
for example in tqdm(snake_case__ , total=len(snake_case__ ) , desc='Saving samples ... ' ):
UpperCAmelCase__ = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_UpperCamelCase = load_dataset('''natural_questions''')
_UpperCamelCase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_UpperCamelCase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
_UpperCamelCase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
_UpperCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_UpperCamelCase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
_UpperCamelCase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 146 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[32, 64, 128] , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1E-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , __a=["stage1", "stage2"] , __a=[1, 2] , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = patch_norm
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = is_training
UpperCAmelCase__ = scope
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = encoder_stride
UpperCAmelCase__ = out_features
UpperCAmelCase__ = out_indices
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = FocalNetModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase__ = None
UpperCAmelCase__ = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ (self , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = FocalNetModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
return
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase__ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ (self , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = reshaped_hidden_states[0].shape
UpperCAmelCase__ = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , __a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = _config_zero_init(__a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(__a )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCAmelCase__ = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__a )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase__ = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (FocalNetBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = FocalNetConfig
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = FocalNetModelTester(self )
| 146 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A , _A=1_3 , _A=3_2 , _A=3 , _A=4 , _A=[1_0, 2_0, 3_0, 4_0] , _A=[2, 2, 3, 2] , _A=True , _A=True , _A=3_7 , _A="gelu" , _A=1_0 , _A=0.02 , _A=["stage2", "stage3", "stage4"] , _A=[2, 3, 4] , _A=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =num_stages
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ConvNextVaModel(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE =model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ConvNextVaForImageClassification(_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE =model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ConvNextVaBackbone(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE =model(_A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =ConvNextVaBackbone(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE =model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase : str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase : List[Any] = False
lowercase : Optional[int] = False
lowercase : Tuple = False
lowercase : Union[str, Any] = False
lowercase : Optional[Any] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ConvNextVaModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_with_labels()
_SCREAMING_SNAKE_CASE =True
if model_class.__name__ in [
*get_values(_A ),
*get_values(_A ),
]:
continue
_SCREAMING_SNAKE_CASE =model_class(_A )
model.to(_A )
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_A , _A , return_labels=_A )
_SCREAMING_SNAKE_CASE =model(**_A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_with_labels()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =True
if (
model_class.__name__
in [*get_values(_A ), *get_values(_A )]
or not model_class.supports_gradient_checkpointing
):
continue
_SCREAMING_SNAKE_CASE =model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_A , _A , return_labels=_A )
_SCREAMING_SNAKE_CASE =model(**_A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(_A , _A , _A ):
_SCREAMING_SNAKE_CASE =model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_A , _A ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_A , _A , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =ConvNextVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _lowerCAmelCase() -> List[Any]:
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_A )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =preprocessor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_A )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
_SCREAMING_SNAKE_CASE =torch.tensor([0.9996, 0.1966, -0.4386] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 165 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _lowerCAmelCase(a : List[str] , a : str , a : Optional[int] ) -> str:
# Initialise PyTorch model
_SCREAMING_SNAKE_CASE =AlbertConfig.from_json_file(a )
print(f"""Building PyTorch model from configuration: {config}""" )
_SCREAMING_SNAKE_CASE =AlbertForPreTraining(a )
# Load weights from tf checkpoint
load_tf_weights_in_albert(a , a , a )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 165 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
_lowerCamelCase : Tuple = parent
_lowerCamelCase : List[Any] = 13
_lowerCamelCase : Union[str, Any] = 7
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = True
_lowerCamelCase : Any = 99
_lowerCamelCase : Optional[int] = 384
_lowerCamelCase : Tuple = 2
_lowerCamelCase : List[Any] = 4
_lowerCamelCase : Any = 37
_lowerCamelCase : Any = """gelu"""
_lowerCamelCase : Optional[Any] = 0.1
_lowerCamelCase : Optional[Any] = 0.1
_lowerCamelCase : Optional[Any] = 512
_lowerCamelCase : Any = 16
_lowerCamelCase : int = 2
_lowerCamelCase : Any = 0.02
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : Dict = 128
_lowerCamelCase : List[str] = 2
_lowerCamelCase : int = 9
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Any = None
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : Tuple = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : Dict = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_lowerCamelCase : Any = None
_lowerCamelCase : str = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : str = TFConvBertModel(config=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCamelCase : int = [input_ids, input_mask]
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : List[str] = TFConvBertForMaskedLM(config=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : Optional[int] = TFConvBertForSequenceClassification(config=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : int = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Dict:
_lowerCamelCase : Optional[Any] = self.num_choices
_lowerCamelCase : Optional[int] = TFConvBertForMultipleChoice(config=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1))
_lowerCamelCase : Union[str, Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1))
_lowerCamelCase : Dict = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1))
_lowerCamelCase : Dict = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]:
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : Optional[int] = TFConvBertForTokenClassification(config=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : List[str] = TFConvBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCamelCase : str = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : List[str] = TFConvBertModelTester(self)
_lowerCamelCase : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37)
def UpperCamelCase_ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[str] = True
_lowerCamelCase : Union[str, Any] = True
if hasattr(SCREAMING_SNAKE_CASE , """use_cache"""):
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length)
_lowerCamelCase : Optional[Any] = getattr(self.model_tester , """key_length""" , SCREAMING_SNAKE_CASE)
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = len(model(SCREAMING_SNAKE_CASE))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , """saved_model""" , """1""")
_lowerCamelCase : str = tf.keras.models.load_model(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE)
if self.is_encoder_decoder:
_lowerCamelCase : Any = outputs["""encoder_hidden_states"""]
_lowerCamelCase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
_lowerCamelCase : Tuple = outputs["""hidden_states"""]
_lowerCamelCase : List[Any] = outputs["""attentions"""]
self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : Tuple = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""")
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length)
_lowerCamelCase : int = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length)
_lowerCamelCase : List[str] = getattr(self.model_tester , """key_length""" , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = getattr(self.model_tester , """key_length""" , SCREAMING_SNAKE_CASE)
def check_decoder_attentions_output(SCREAMING_SNAKE_CASE):
_lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE)
self.assertEqual(out_len % 2 , 0)
_lowerCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(SCREAMING_SNAKE_CASE):
_lowerCamelCase : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Tuple = False
_lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
_lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE)
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE)
check_encoder_attentions_output(SCREAMING_SNAKE_CASE)
if self.is_encoder_decoder:
_lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE)
check_decoder_attentions_output(SCREAMING_SNAKE_CASE)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE)
check_encoder_attentions_output(SCREAMING_SNAKE_CASE)
# Check attention is always last and order is fine
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(SCREAMING_SNAKE_CASE))
self.assertEqual(model.config.output_hidden_states , SCREAMING_SNAKE_CASE)
check_encoder_attentions_output(SCREAMING_SNAKE_CASE)
@require_tf
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : List[Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""")
_lowerCamelCase : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE)[0]
_lowerCamelCase : str = [1, 6, 768]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
])
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4)
| 88 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A_ = None
A_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class snake_case :
'''simple docstring'''
UpperCAmelCase : bool = True
UpperCAmelCase : Optional[str] = None
# Automatically constructed
UpperCAmelCase : ClassVar[str] = "PIL.Image.Image"
UpperCAmelCase : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
UpperCAmelCase : str = field(default="""Image""" , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.pa_type
def _lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = np.array(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase_ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _lowercase ( self : Dict , lowerCAmelCase_ : dict , lowerCAmelCase_ : Any=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = PIL.Image.open(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ = path.split('''::''' )[-1]
try:
SCREAMING_SNAKE_CASE_ = string_to_dict(lowerCAmelCase_ , config.HUB_DATASETS_URL )['''repo_id''']
SCREAMING_SNAKE_CASE_ = token_per_repo_id.get(lowerCAmelCase_ )
except ValueError:
SCREAMING_SNAKE_CASE_ = None
with xopen(lowerCAmelCase_ , '''rb''' , use_auth_token=lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE_ = BytesIO(f.read() )
SCREAMING_SNAKE_CASE_ = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _lowercase ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _lowercase ( self : List[Any] , lowerCAmelCase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field('''bytes''' )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field('''path''' )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array(
[encode_np_array(np.array(lowerCAmelCase_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def _lowercase ( self : Dict , lowerCAmelCase_ : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase_ : List[str] ):
with xopen(lowerCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
return bytes_
SCREAMING_SNAKE_CASE_ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array(
[os.path.basename(lowerCAmelCase_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def UpperCAmelCase ( )-> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCAmelCase ( UpperCAmelCase )-> bytes:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE_ = image.format
else:
SCREAMING_SNAKE_CASE_ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(UpperCAmelCase ,format=UpperCAmelCase )
return buffer.getvalue()
def UpperCAmelCase ( UpperCAmelCase )-> dict:
'''simple docstring'''
if hasattr(UpperCAmelCase ,'''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase )}
def UpperCAmelCase ( UpperCAmelCase )-> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
SCREAMING_SNAKE_CASE_ = array.dtype
SCREAMING_SNAKE_CASE_ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE_ = dtype.kind
SCREAMING_SNAKE_CASE_ = dtype.itemsize
SCREAMING_SNAKE_CASE_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE_ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE_ = dtype_byteorder + dtype_kind + str(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = np.dtype(UpperCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE_ = PIL.Image.fromarray(array.astype(UpperCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase )}
def UpperCAmelCase ( UpperCAmelCase )-> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = first_non_null_value(UpperCAmelCase )
if isinstance(UpperCAmelCase ,UpperCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase ,np.ndarray ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(UpperCAmelCase )
return [obj_to_image_dict_func(UpperCAmelCase ) for obj in objs]
elif isinstance(UpperCAmelCase ,PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(UpperCAmelCase )
return [obj_to_image_dict_func(UpperCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 393 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCAmelCase__ ( UpperCamelCase_ , unittest.TestCase ):
__A : Any = CpmAntTokenizer
__A : Optional[Any] = False
def _lowercase ( self : Dict):
super().setUp()
A__ : List[Any] = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
A__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
@tooslow
def _lowercase ( self : List[Any]):
A__ : Any = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b")
A__ : Union[str, Any] = '今天天气真好!'
A__ : Tuple = ['今天', '天气', '真', '好', '!']
A__ : Tuple = tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
A__ : Optional[int] = '今天天气真好!'
A__ : str = [tokenizer.bos_token] + tokens
A__ : Any = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , __a)
A__ : List[Any] = tokenizer.decode(__a)
self.assertEqual(__a , __a) | 717 |
def snake_case__ ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = []
A__ : Optional[int] = 1
while len(__lowercase ) < 1E6:
constant.append(str(__lowercase ) )
i += 1
A__ : Any = "".join(__lowercase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution()) | 182 | 0 |
"""simple docstring"""
import numpy as np
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Dict = int(np.ceil((x_end - xa) / h ) )
lowerCAmelCase__ : Optional[Any] = np.zeros((n + 1,) )
lowerCAmelCase__ : List[Any] = ya
lowerCAmelCase__ : Optional[Any] = xa
for k in range(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = f(__UpperCAmelCase , y[k] )
lowerCAmelCase__ : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase__ : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase__ : int = f(x + h , y[k] + h * ka )
lowerCAmelCase__ : int = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_A = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_A = get_tests_dir("""fixtures/vocab.json""")
_A = get_tests_dir("""fixtures""")
class _lowerCamelCase ( unittest.TestCase ):
_lowerCamelCase :Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = 0
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Any = WavaVecaConfig()
lowerCAmelCase__ : str = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
copyfile(UpperCamelCase , os.path.join(UpperCamelCase , """vocab.json""" ) )
lowerCAmelCase__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[int] = WavaVecaFeatureExtractor()
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowerCAmelCase__ : List[str] = WavaVecaProcessor(UpperCamelCase , UpperCamelCase )
# save in new folder
processor.save_pretrained(UpperCamelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """r""" ) as f:
lowerCAmelCase__ : Optional[int] = json.load(UpperCamelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" ) as f:
f.write(json.dumps(UpperCamelCase ) )
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : int = WavaVecaFeatureExtractor()
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowerCAmelCase__ : List[str] = WavaVecaProcessor(UpperCamelCase , UpperCamelCase )
# save in new folder
processor.save_pretrained(UpperCamelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """r""" ) as f:
lowerCAmelCase__ : Any = json.load(UpperCamelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" ) as f:
f.write(json.dumps(UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[str] = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(UpperCamelCase )
# copy relevant files
copyfile(UpperCamelCase , os.path.join(UpperCamelCase , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" ) as f:
f.write("""{}""" )
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase ):
lowerCAmelCase__ : int = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase ):
lowerCAmelCase__ : Any = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowerCAmelCase__ : str = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowerCAmelCase__ : Dict = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoFeatureExtractor.register(UpperCamelCase , UpperCamelCase )
AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase )
AutoProcessor.register(UpperCamelCase , UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase ):
AutoProcessor.register(UpperCamelCase , UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ : str = CustomFeatureExtractor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : List[Any] = os.path.join(UpperCamelCase , """vocab.txt""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase__ : Union[str, Any] = CustomTokenizer(UpperCamelCase )
lowerCAmelCase__ : List[str] = CustomProcessor(UpperCamelCase , UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : str = AutoProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Union[str, Any] = False
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = False
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = "AutoFeatureExtractor"
_lowerCamelCase :Tuple = "AutoTokenizer"
_lowerCamelCase :Union[str, Any] = False
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoFeatureExtractor.register(UpperCamelCase , UpperCamelCase )
AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase )
AutoProcessor.register(UpperCamelCase , UpperCamelCase )
# If remote code is not set, the default is to use local classes.
lowerCAmelCase__ : str = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCamelCase ( unittest.TestCase ):
_lowerCamelCase :Tuple = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _lowerCAmelCase ( cls : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def _lowerCAmelCase ( cls : Dict ) -> Tuple:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = WavaVecaProcessor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase , """test-processor""" ) , push_to_hub=UpperCamelCase , use_auth_token=self._token )
lowerCAmelCase__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(new_processor.feature_extractor , UpperCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase , """test-processor-org""" ) , push_to_hub=UpperCamelCase , use_auth_token=self._token , organization="""valid_org""" , )
lowerCAmelCase__ : List[Any] = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase , getattr(new_processor.feature_extractor , UpperCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCAmelCase__ : int = CustomFeatureExtractor.from_pretrained(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : List[str] = os.path.join(UpperCamelCase , """vocab.txt""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase__ : int = CustomTokenizer(UpperCamelCase )
lowerCAmelCase__ : List[Any] = CustomProcessor(UpperCamelCase , UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
lowerCAmelCase__ : List[str] = Repository(UpperCamelCase , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(UpperCamelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase , """tokenizer_config.json""" ) ) as f:
lowerCAmelCase__ : List[Any] = json.load(UpperCamelCase )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowerCAmelCase__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 299 | 1 |
from __future__ import annotations
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = sum(__lowercase)
create_state_space_tree(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
return result
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
if sum(__lowercase) > max_sum or (remaining_nums_sum + sum(__lowercase)) < max_sum:
return
if sum(__lowercase) == max_sum:
result.append(__lowercase)
return
for index in range(__lowercase , len(__lowercase)):
create_state_space_tree(
__lowercase , __lowercase , index + 1 , [*path, nums[index]] , __lowercase , remaining_nums_sum - nums[index] , )
snake_case__ : Optional[int] = [3, 3_4, 4, 1_2, 5, 2]
snake_case__ : Union[str, Any] = 9
snake_case__ : Dict = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 701 |
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
if n == 0:
return 0
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , __lowercase))
return max_revue
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
return _top_down_cut_rod_recursive(__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __lowercase , __lowercase) , )
UpperCamelCase_ = max_revenue
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
UpperCamelCase_ = 0
for i in range(1 , n + 1):
UpperCamelCase_ = max_rev[i]
for j in range(1 , i + 1):
UpperCamelCase_ = max(__lowercase , prices[j - 1] + max_rev[i - j])
UpperCamelCase_ = max_revenue_i
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
if n < 0:
UpperCamelCase_ = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__lowercase)
if n > len(__lowercase):
UpperCamelCase_ = (
'Each integral piece of rod must have a corresponding price. '
f"""Got n = {n} but length of prices = {len(__lowercase)}"""
)
raise ValueError(__lowercase)
def _snake_case ():
UpperCamelCase_ = [6, 10, 12, 15, 20, 23]
UpperCamelCase_ = len(__lowercase)
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase_ = 36
UpperCamelCase_ = top_down_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = bottom_up_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = naive_cut_rod_recursive(__lowercase , __lowercase)
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 618 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.