code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : Dict =logging.get_logger(__name__)
__snake_case : List[Any] ={'vocab_file': 'spiece.model'}
__snake_case : int ={
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__snake_case : Dict ={
'google/bigbird-roberta-base': 4_0_9_6,
'google/bigbird-roberta-large': 4_0_9_6,
'google/bigbird-base-trivia-itc': 4_0_9_6,
}
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ =["""input_ids""", """attention_mask"""]
snake_case_ =[]
def __init__(self ,__lowerCamelCase ,__lowerCamelCase="<unk>" ,__lowerCamelCase="<s>" ,__lowerCamelCase="</s>" ,__lowerCamelCase="<pad>" ,__lowerCamelCase="[SEP]" ,__lowerCamelCase="[MASK]" ,__lowerCamelCase="[CLS]" ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AddedToken(lowerCAmelCase_ ,lstrip=lowerCAmelCase_ ,rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) else bos_token
lowerCAmelCase__ : Tuple = AddedToken(lowerCAmelCase_ ,lstrip=lowerCAmelCase_ ,rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) else eos_token
lowerCAmelCase__ : Optional[Any] = AddedToken(lowerCAmelCase_ ,lstrip=lowerCAmelCase_ ,rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) else unk_token
lowerCAmelCase__ : Optional[Any] = AddedToken(lowerCAmelCase_ ,lstrip=lowerCAmelCase_ ,rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) else pad_token
lowerCAmelCase__ : Any = AddedToken(lowerCAmelCase_ ,lstrip=lowerCAmelCase_ ,rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) else cls_token
lowerCAmelCase__ : int = AddedToken(lowerCAmelCase_ ,lstrip=lowerCAmelCase_ ,rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : List[Any] = AddedToken(lowerCAmelCase_ ,lstrip=lowerCAmelCase_ ,rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) else mask_token
lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ ,eos_token=lowerCAmelCase_ ,unk_token=lowerCAmelCase_ ,pad_token=lowerCAmelCase_ ,sep_token=lowerCAmelCase_ ,mask_token=lowerCAmelCase_ ,cls_token=lowerCAmelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCAmelCase_ ,)
lowerCAmelCase__ : Optional[Any] = vocab_file
lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.__dict__.copy()
lowerCAmelCase__ : str = None
return state
def __setstate__(self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ ,out_type=lowerCAmelCase_ )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Union[str, Any] = ''''''
lowerCAmelCase__ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
lowerCAmelCase__ : List[str] = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = True ,**__lowerCamelCase ,) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = kwargs.pop('''use_source_tokenizer''' ,lowerCAmelCase_ )
lowerCAmelCase__ : Union[str, Any] = self.convert_ids_to_tokens(lowerCAmelCase_ ,skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Tuple = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
lowerCAmelCase__ : Dict = []
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCAmelCase__ : Dict = re.sub(R''' (\[(MASK|SEP)\])''' ,R'''\1''' ,''' '''.join(lowerCAmelCase_ ) )
else:
lowerCAmelCase__ : List[str] = ''''''.join(lowerCAmelCase_ )
lowerCAmelCase__ : List[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase__ : Optional[int] = self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : List[Any] = os.path.join(
lowerCAmelCase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ ,'''wb''' ) as fi:
lowerCAmelCase__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ ,token_ids_a=lowerCAmelCase_ ,already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 129 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected', [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase_, i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0, 4 ), range(4, 7 ), range(7, 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = _distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected', [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = _split_gen_kwargs(lowerCAmelCase_, lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected', [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
], )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any ):
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
__lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 284 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = StableUnCLIPImgaImgPipeline
UpperCamelCase_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ : Optional[Any] = frozenset([] )
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = 3_2
_UpperCAmelCase : Any = embedder_hidden_size
# image encoding components
_UpperCAmelCase : Dict = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_UpperCAmelCase : Tuple = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_ )
_UpperCAmelCase : List[str] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , )
torch.manual_seed(0 )
_UpperCAmelCase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL()
_UpperCAmelCase : List[Any] = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : List[str]=True ) -> Optional[Any]:
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith("mps" ):
_UpperCAmelCase : str = torch.manual_seed(lowerCAmelCase_ )
else:
_UpperCAmelCase : Tuple = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
if pil_image:
_UpperCAmelCase : List[str] = input_image * 0.5 + 0.5
_UpperCAmelCase : Any = input_image.clamp(0 , 1 )
_UpperCAmelCase : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase : int = DiffusionPipeline.numpy_to_pil(lowerCAmelCase_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : Optional[Any] = StableUnCLIPImgaImgPipeline(**lowerCAmelCase_ )
_UpperCAmelCase : List[str] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
inputs.update({"image_embeds": None} )
_UpperCAmelCase : Tuple = sd_pipe(**lowerCAmelCase_ ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase : Optional[Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_ )
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase_ )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_UpperCAmelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
_UpperCAmelCase : Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : List[str] = pipe(lowerCAmelCase_ , "anime turle" , generator=lowerCAmelCase_ , output_type="np" )
_UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_UpperCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
_UpperCAmelCase : str = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : int = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = pipe(lowerCAmelCase_ , "anime turle" , generator=lowerCAmelCase_ , output_type="np" )
_UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
_UpperCAmelCase : int = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : int = pipe(
lowerCAmelCase_ , "anime turtle" , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9 | 145 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
a_ = True
def lowercase ( self : Optional[Any] ) -> List[str]:
super().setUp()
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> str:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase ( self : List[Any] , lowerCAmelCase_ : str ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowercase ( self : List[str] ) -> Optional[int]:
pass # TODO add if relevant
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def lowercase ( self : Union[str, Any] ) -> Any:
pass # TODO add if relevant
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : List[Any] ) -> int:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : int ) -> str:
__lowerCAmelCase = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_jumanpp
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = JumanppTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = JumanppTokenizer(normalize_text=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
__lowerCAmelCase = tokenizer.subword_tokenizer
__lowerCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowerCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
__lowerCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowerCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
def lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : str , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase ( self : Dict ) -> str:
pass # TODO add if relevant
def lowercase ( self : Any ) -> str:
pass # TODO add if relevant
def lowercase ( self : List[Any] ) -> int:
pass # TODO add if relevant
def lowercase ( self : str ) -> str:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowerCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
__lowerCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 284 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
A: Tuple = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Dict ):
UpperCAmelCase : Tuple = set()
UpperCAmelCase : Optional[Any] = []
def parse_line(UpperCamelCase : Optional[Any] ):
for line in fp:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase : Dict = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCAmelCase_ ) > 0:
UpperCAmelCase : Union[str, Any] = """\n""".join(lowerCAmelCase_ )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(lowerCAmelCase_ )
buffer.clear()
continue
else:
UpperCAmelCase : Optional[int] = line.strip()
buffer.append(lowerCAmelCase_ )
if from_gh:
for filename in os.listdir(lowerCAmelCase_ ):
UpperCAmelCase : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCAmelCase_ ) as fp:
parse_line(lowerCAmelCase_ )
else:
try:
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCAmelCase_ ) as fp:
parse_line(lowerCAmelCase_ )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Dict ):
UpperCAmelCase : Optional[int] = set()
UpperCAmelCase : str = [os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) for p in os.listdir(lowerCAmelCase_ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase_ , lowerCAmelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def _snake_case ( UpperCamelCase : Dict ):
return values.split(""",""" )
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
A: int = parser.parse_args()
A: List[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
A: Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
A: Optional[Any] = extract_warnings(args.output_dir, args.targets)
A: int = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 109 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """beit"""
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=8_1_9_2 , lowerCAmelCase_ : Optional[int]=7_6_8 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Any=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=[3, 5, 7, 1_1] , lowerCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=0.4 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=2_5_5 , **lowerCAmelCase_ : Any , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : Optional[Any] ) -> float:
return 1e-4
| 284 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = True ):
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
UpperCAmelCase : List[Any] = timm.create_model('levit_128s' , pretrained=lowerCAmelCase_ )
else:
UpperCAmelCase : Union[str, Any] = timm.create_model('levit_128' , pretrained=lowerCAmelCase_ )
if hidden_sizes == 1_92:
UpperCAmelCase : Optional[int] = timm.create_model('levit_192' , pretrained=lowerCAmelCase_ )
if hidden_sizes == 2_56:
UpperCAmelCase : Tuple = timm.create_model('levit_256' , pretrained=lowerCAmelCase_ )
if hidden_sizes == 3_84:
UpperCAmelCase : Any = timm.create_model('levit_384' , pretrained=lowerCAmelCase_ )
from_model.eval()
UpperCAmelCase : Optional[int] = LevitForImageClassificationWithTeacher(lowerCAmelCase_ ).eval()
UpperCAmelCase : List[Any] = OrderedDict()
UpperCAmelCase : Optional[Any] = from_model.state_dict()
UpperCAmelCase : Any = list(from_model.state_dict().keys() )
UpperCAmelCase : Tuple = list(our_model.state_dict().keys() )
print(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase : Optional[int] = weights[og_keys[i]]
our_model.load_state_dict(lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = torch.randn((2, 3, 2_24, 2_24) )
UpperCAmelCase : Union[str, Any] = from_model(lowerCAmelCase_ )
UpperCAmelCase : List[Any] = our_model(lowerCAmelCase_ ).logits
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ), "The model logits don't match the original one."
UpperCAmelCase : Optional[Any] = name
print(lowerCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCAmelCase : Dict = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True ):
UpperCAmelCase : Dict = 'imagenet-1k-id2label.json'
UpperCAmelCase : Any = 10_00
UpperCAmelCase : List[Any] = (1, num_labels)
UpperCAmelCase : Any = 'huggingface/label-files'
UpperCAmelCase : int = num_labels
UpperCAmelCase : str = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase : Any = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase : str = partial(lowerCAmelCase_ , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ )
UpperCAmelCase : List[Any] = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
UpperCAmelCase : Tuple = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCAmelCase_ , names_to_config[model_name] , lowerCAmelCase_ , lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
lowercase__ = parser.parse_args()
lowercase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 151 |
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 284 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __lowercase ( _UpperCamelCase ):
lowerCamelCase : str = 42
lowerCamelCase : Union[str, Any] = 42
lowerCamelCase : Optional[Any] = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 318 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Any = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """pegasus"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , lowerCAmelCase_ : Union[str, Any]=5_0_2_6_5 , lowerCAmelCase_ : Union[str, Any]=1_0_2_4 , lowerCAmelCase_ : Union[str, Any]=1_2 , lowerCAmelCase_ : Dict=4_0_9_6 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : List[Any]=1_2 , lowerCAmelCase_ : Union[str, Any]=4_0_9_6 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=1_0_2_4 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Tuple , ) -> List[str]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
@property
def lowercase ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def lowercase ( self : Optional[Any] ) -> int:
return self.d_model
| 284 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCamelCase__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "beit"
def __init__(self : List[Any] , snake_case_ : Tuple=8_1_9_2 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1_2 , snake_case_ : Optional[int]=1_2 , snake_case_ : Any=3_0_7_2 , snake_case_ : Optional[int]="gelu" , snake_case_ : Any=0.0 , snake_case_ : Any=0.0 , snake_case_ : Any=0.02 , snake_case_ : int=1E-12 , snake_case_ : int=2_2_4 , snake_case_ : str=1_6 , snake_case_ : int=3 , snake_case_ : Dict=False , snake_case_ : int=False , snake_case_ : List[Any]=False , snake_case_ : int=False , snake_case_ : List[str]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[str]=True , snake_case_ : List[Any]=[3, 5, 7, 1_1] , snake_case_ : Optional[Any]=[1, 2, 3, 6] , snake_case_ : Tuple=True , snake_case_ : Dict=0.4 , snake_case_ : Tuple=2_5_6 , snake_case_ : Any=1 , snake_case_ : Any=False , snake_case_ : Optional[int]=2_5_5 , **snake_case_ : Any , ):
super().__init__(**lowerCAmelCase_ )
__a : Optional[int] = vocab_size
__a : Union[str, Any] = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : Tuple = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : str = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : Tuple = initializer_range
__a : List[str] = layer_norm_eps
__a : Optional[Any] = image_size
__a : Dict = patch_size
__a : Tuple = num_channels
__a : Union[str, Any] = use_mask_token
__a : int = use_absolute_position_embeddings
__a : str = use_relative_position_bias
__a : Union[str, Any] = use_shared_relative_position_bias
__a : str = layer_scale_init_value
__a : Dict = drop_path_rate
__a : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
__a : List[str] = out_indices
__a : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
__a : Optional[int] = use_auxiliary_head
__a : Tuple = auxiliary_loss_weight
__a : str = auxiliary_channels
__a : Dict = auxiliary_num_convs
__a : List[Any] = auxiliary_concat_input
__a : Dict = semantic_loss_ignore_index
class UpperCamelCase__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : int = version.parse("1.11" )
@property
def lowerCAmelCase (self : Tuple ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase (self : Optional[Any] ):
return 1E-4
| 216 |
def a_ ( lowerCAmelCase_ : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__lowerCAmelCase = 4
__lowerCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
__lowerCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 284 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : List[str] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ : List[Any] = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ : str = {
'google/rembert': 256,
}
lowerCAmelCase__ : Optional[int] = '▁'
class snake_case ( _UpperCamelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = RemBertTokenizer
def __init__( self : int ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Any="[CLS]" ,lowerCamelCase__ : Dict="[SEP]" ,lowerCamelCase__ : Dict="<unk>" ,lowerCamelCase__ : str="[SEP]" ,lowerCamelCase__ : Tuple="<pad>" ,lowerCamelCase__ : Optional[int]="[CLS]" ,lowerCamelCase__ : int="[MASK]" ,**lowerCamelCase__ : Optional[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(lowerCAmelCase_ ,lstrip=lowerCAmelCase_ ,rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) else mask_token
super().__init__(
lowerCAmelCase_ ,tokenizer_file=lowerCAmelCase_ ,do_lower_case=lowerCAmelCase_ ,remove_space=lowerCAmelCase_ ,keep_accents=lowerCAmelCase_ ,bos_token=lowerCAmelCase_ ,eos_token=lowerCAmelCase_ ,unk_token=lowerCAmelCase_ ,sep_token=lowerCAmelCase_ ,pad_token=lowerCAmelCase_ ,cls_token=lowerCAmelCase_ ,mask_token=lowerCAmelCase_ ,**lowerCAmelCase_ ,)
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase_ ) )
return
UpperCAmelCase__ = os.path.join(
lowerCAmelCase_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file ,lowerCAmelCase_ )
return (out_vocab_file,)
| 98 |
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : list[int], lowerCAmelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
return min(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(lowerCAmelCase_ ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 284 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCamelCase__ = {
'google/pegasus-xsum': 5_1_2,
}
class lowerCamelCase_ ( _UpperCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PegasusTokenizer
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , _A : int=None , _A : List[Any]=None , _A : Union[str, Any]="<pad>" , _A : int="</s>" , _A : Optional[int]="<unk>" , _A : Union[str, Any]="<mask_2>" , _A : Tuple="<mask_1>" , _A : Dict=None , _A : Optional[int]=103 , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(lowerCAmelCase_ )}, but is"""
f""" {type(lowerCAmelCase_ )}""" )
UpperCAmelCase__ : Optional[int] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(lowerCAmelCase_ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase_ ) ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase__ : Any = additional_special_tokens_extended
else:
UpperCAmelCase__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , mask_token_sent=lowerCAmelCase_ , offset=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase__ : Optional[int] = vocab_file
UpperCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def lowercase_ ( self : Union[str, Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ ( self : Any , _A : List , _A : Optional[List] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ ( self : str , _A : Optional[Any] , _A : Optional[Any]=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Dict , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : List[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 181 |
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 0 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 284 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase = datasets.logging.get_logger(__name__)
UpperCamelCase = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
UpperCamelCase = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
UpperCamelCase = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=False ,snake_case__=False ,snake_case__=True ,snake_case__=False ,snake_case__="dummy_doc" ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {doc: key_lines}
_SCREAMING_SNAKE_CASE = {doc: sys_lines}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = reader.get_doc_mentions(lowerCAmelCase_ ,key_doc_lines[doc] ,lowerCAmelCase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
_SCREAMING_SNAKE_CASE = reader.set_annotated_parse_trees(lowerCAmelCase_ ,key_doc_lines[doc] ,lowerCAmelCase_ ,lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = reader.get_doc_mentions(lowerCAmelCase_ ,sys_doc_lines[doc] ,lowerCAmelCase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
_SCREAMING_SNAKE_CASE = reader.set_annotated_parse_trees(lowerCAmelCase_ ,key_doc_lines[doc] ,lowerCAmelCase_ ,lowerCAmelCase_ )
if remove_nested:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = reader.remove_nested_coref_mentions(lowerCAmelCase_ ,lowerCAmelCase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = reader.remove_nested_coref_mentions(lowerCAmelCase_ ,lowerCAmelCase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_SCREAMING_SNAKE_CASE = reader.get_mention_assignments(lowerCAmelCase_ ,lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = reader.get_mention_assignments(lowerCAmelCase_ ,lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_coref_infos(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
for name, metric in metrics:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = evaluator.evaluate_documents(lowerCAmelCase_ ,lowerCAmelCase_ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) ,F'Recall: {recall * 1_00:.2f}' ,F' Precision: {precision * 1_00:.2f}' ,F' F1: {fa * 1_00:.2f}' ,)
if conll_subparts_num == 3:
_SCREAMING_SNAKE_CASE = (conll / 3) * 1_00
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_SCREAMING_SNAKE_CASE = line.split()[5]
if not parse_col == "-":
_SCREAMING_SNAKE_CASE = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __UpperCAmelCase (datasets.Metric ):
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: Tuple=False , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: int=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_SCREAMING_SNAKE_CASE = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use \'min_span\'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_SCREAMING_SNAKE_CASE = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 306 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple=1024, lowerCAmelCase_ : Optional[Any]=1024, lowerCAmelCase_ : Tuple=False, **lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='train', **lowerCAmelCase_ )
__lowerCAmelCase = tok.pad_token_id
def get_lens(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = tqdm(
DataLoader(lowerCAmelCase_, batch_size=512, num_workers=8, shuffle=lowerCAmelCase_, collate_fn=ds.collate_fn ), desc=str(ds.len_file ), )
__lowerCAmelCase = []
for batch in dl:
__lowerCAmelCase = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
__lowerCAmelCase = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_, lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_, lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='val', **lowerCAmelCase_ )
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_, train_ds.len_file )
pickle_save(lowerCAmelCase_, val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 284 | 0 |
import logging
from transformers import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """bertabs"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=30_522 , SCREAMING_SNAKE_CASE : List[Any]=512 , SCREAMING_SNAKE_CASE : int=6 , SCREAMING_SNAKE_CASE : List[str]=512 , SCREAMING_SNAKE_CASE : List[str]=8 , SCREAMING_SNAKE_CASE : List[Any]=512 , SCREAMING_SNAKE_CASE : Optional[Any]=0.2 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : List[Any]=768 , SCREAMING_SNAKE_CASE : Any=8 , SCREAMING_SNAKE_CASE : Union[str, Any]=2_048 , SCREAMING_SNAKE_CASE : Dict=0.2 , **SCREAMING_SNAKE_CASE : int , ):
super().__init__(**lowerCAmelCase_ )
lowercase__ : List[str] = vocab_size
lowercase__ : str = max_pos
lowercase__ : List[Any] = enc_layers
lowercase__ : int = enc_hidden_size
lowercase__ : int = enc_heads
lowercase__ : Any = enc_ff_size
lowercase__ : List[str] = enc_dropout
lowercase__ : int = dec_layers
lowercase__ : List[Any] = dec_hidden_size
lowercase__ : Tuple = dec_heads
lowercase__ : Dict = dec_ff_size
lowercase__ : List[str] = dec_dropout
| 130 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : List[Any]=None ):
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCAmelCase_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
a_ = OPTConfig
a_ = {}
a_ = """gelu"""
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Any=9_9 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=2_0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_6 , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = embed_dim
__lowerCAmelCase = word_embed_proj_dim
__lowerCAmelCase = False
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
__lowerCAmelCase = prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = TFOPTModel(config=lowerCAmelCase_ )
__lowerCAmelCase = inputs_dict['input_ids']
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict['attention_mask'][:1, :]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a_ = (TFOPTForCausalLM,) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
a_ = 10
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFOPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
__lowerCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
__lowerCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return tf.constant(lowerCAmelCase_, dtype=tf.intaa )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = 99
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
__lowerCAmelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
__lowerCAmelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Dict:
super().setUp()
__lowerCAmelCase = 'facebook/opt-350m'
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Optional[int] ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase ( self : int ) -> str:
__lowerCAmelCase = 'facebook/opt-125m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = 'left'
# use different length sentences to test batching
__lowerCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ )
__lowerCAmelCase = inputs['input_ids']
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs['attention_mask'] )
__lowerCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ )
__lowerCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
__lowerCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 284 | 0 |
import os
def lowerCAmelCase__ ( lowerCamelCase_ : str = "input.txt"):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase_) ,lowerCAmelCase_)) as input_file:
lowerCAmelCase__ : int = [
[int(lowerCAmelCase_) for element in line.split(''',''')]
for line in input_file.readlines()
]
lowerCAmelCase__ : Optional[int] = len(lowerCAmelCase_)
lowerCAmelCase__ : Any = len(matrix[0])
lowerCAmelCase__ : str = [[-1 for _ in range(lowerCAmelCase_)] for _ in range(lowerCAmelCase_)]
for i in range(lowerCAmelCase_):
lowerCAmelCase__ : Dict = matrix[i][0]
for j in range(1 ,lowerCAmelCase_):
for i in range(lowerCAmelCase_):
lowerCAmelCase__ : Tuple = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 ,lowerCAmelCase_):
lowerCAmelCase__ : int = min(
minimal_path_sums[i][j] ,minimal_path_sums[i - 1][j] + matrix[i][j])
for i in range(rows - 2 ,-1 ,-1):
lowerCAmelCase__ : int = min(
minimal_path_sums[i][j] ,minimal_path_sums[i + 1][j] + matrix[i][j])
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 129 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['LayoutLMv3FeatureExtractor']
_snake_case : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( a_: BertModel, a_: str, a_: str ):
_UpperCAmelCase : Optional[Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_UpperCAmelCase : Optional[int] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = model.state_dict()
def to_tf_var_name(a_: str ):
for patt, repl in iter(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = name.replace(lowerCAmelCase_, lowerCAmelCase_ )
return f"""bert/{name}"""
def create_tf_var(a_: np.ndarray, a_: str, a_: tf.Session ):
_UpperCAmelCase : int = tf.dtypes.as_dtype(tensor.dtype )
_UpperCAmelCase : Optional[Any] = tf.get_variable(dtype=lowerCAmelCase_, shape=tensor.shape, name=lowerCAmelCase_, initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_UpperCAmelCase : str = to_tf_var_name(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_UpperCAmelCase : Any = torch_tensor.T
_UpperCAmelCase : Tuple = create_tf_var(tensor=lowerCAmelCase_, name=lowerCAmelCase_, session=lowerCAmelCase_ )
tf.keras.backend.set_value(lowerCAmelCase_, lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = session.run(lowerCAmelCase_ )
print(f"""Successfully created {tf_name}: {np.allclose(lowerCAmelCase_, lowerCAmelCase_ )}""" )
_UpperCAmelCase : str = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase_, os.path.join(lowerCAmelCase_, model_name.replace("-", "_" ) + ".ckpt" ) )
def __UpperCAmelCase ( a_: str=None ):
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--model_name", type=lowerCAmelCase_, required=lowerCAmelCase_, help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir", type=lowerCAmelCase_, default=lowerCAmelCase_, required=lowerCAmelCase_, help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path", type=lowerCAmelCase_, required=lowerCAmelCase_, help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir", type=lowerCAmelCase_, required=lowerCAmelCase_, help="Directory in which to save tensorflow model" )
_UpperCAmelCase : Any = parser.parse_args(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase_, ckpt_dir=args.tf_cache_dir, model_name=args.model_name )
if __name__ == "__main__":
main() | 145 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 284 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Optional[Any] = list(lowerCAmelCase_ )
UpperCAmelCase : List[Any] = list(lowerCAmelCase_ )
UpperCAmelCase : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Union[str, Any] = """_"""
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def _snake_case ( UpperCamelCase : list[str] ):
UpperCAmelCase : Dict = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(lowerCAmelCase_ )
UpperCAmelCase : Tuple = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
UpperCAmelCase : Tuple = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Dict = """*"""
UpperCAmelCase : Tuple = """*"""
temp.append("""X""" )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
UpperCAmelCase : Dict = list(set(lowerCAmelCase_ ) )
def _snake_case ( UpperCamelCase : int , UpperCamelCase : Sequence[float] ):
UpperCAmelCase : List[Any] = []
for minterm in minterms:
UpperCAmelCase : Union[str, Any] = """"""
for _ in range(lowerCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int ):
UpperCAmelCase : Tuple = list(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = list(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _snake_case ( UpperCamelCase : list[list[int]] , UpperCamelCase : list[str] ):
UpperCAmelCase : List[str] = []
UpperCAmelCase : List[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : List[Any] = j
if count == 1:
UpperCAmelCase : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : str = 0
UpperCAmelCase : List[Any] = -1
UpperCAmelCase : str = 0
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase : Tuple = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Optional[int] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase : Dict = 0
def _snake_case ( UpperCamelCase : list[str] , UpperCamelCase : list[str] ):
UpperCAmelCase : str = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
UpperCAmelCase : Optional[Any] = 1
return chart
def _snake_case ( ):
UpperCAmelCase : Optional[int] = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : int = [
float(lowerCAmelCase_ )
for x in input(
"""Enter the decimal representation of Minterms \'Spaces Separated\'\n""" ).split()
]
UpperCAmelCase : Dict = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = check(lowerCAmelCase_ )
print("""Prime Implicants are:""" )
print(lowerCAmelCase_ )
UpperCAmelCase : int = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : str = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print("""Essential Prime Implicants are:""" )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 109 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_snake_case : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_snake_case : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_snake_case : set[int] = {ord(char) for char in VALID_CHARS}
_snake_case : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCAmelCase_ : list[int], lowerCAmelCase_ : tuple[int, ...] ):
__lowerCAmelCase = ""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ), lowerCAmelCase_ ):
__lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def a_ ( lowerCAmelCase_ : list[int] ):
__lowerCAmelCase = []
for key in product(lowerCAmelCase_, repeat=3 ):
__lowerCAmelCase = try_key(lowerCAmelCase_, lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def a_ ( lowerCAmelCase_ : list[str], lowerCAmelCase_ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCAmelCase_ : str = "p059_cipher.txt" ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding='utf-8' )
__lowerCAmelCase = [int(lowerCAmelCase_ ) for number in data.strip().split(',' )]
__lowerCAmelCase = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
__lowerCAmelCase = filter_common_word(lowerCAmelCase_, lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
__lowerCAmelCase = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 284 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A_ ( _UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = field(default=_UpperCamelCase , metadata={"""help""": """Whether to use SortishSampler or not."""} )
UpperCAmelCase_ : List[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
UpperCAmelCase_ : Any = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
UpperCAmelCase_ : Dict = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
UpperCAmelCase_ : Union[str, Any] = field(
default=_UpperCamelCase , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
UpperCAmelCase : str = super().to_dict()
for k, v in d.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase : Optional[int] = v.to_dict()
return d
| 151 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_snake_case : Tuple = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_snake_case : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a_ ( lowerCAmelCase_ : str ):
if "://" in dataset_path:
__lowerCAmelCase = dataset_path.split('://' )[1]
return dataset_path
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = not is_remote_filesystem(lowerCAmelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase_ ), fs._strip_protocol(lowerCAmelCase_ ) )
else:
fs.mv(lowerCAmelCase_, lowerCAmelCase_, recursive=lowerCAmelCase_ )
def a_ ( ):
if hasattr(fsspec.asyn, 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = threading.Lock()
| 284 | 0 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 318 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int] ):
if metric == "rouge2":
__lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__lowerCAmelCase = ModelCheckpoint(
dirpath=lowerCAmelCase_, filename=lowerCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ):
return EarlyStopping(
monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase_, verbose=lowerCAmelCase_, )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / 'test_results.txt'
__lowerCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'a+' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Dict:
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , 'test' )
@rank_zero_only
def lowercase ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Any ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 284 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ ={
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216 |
import re
from filelock import FileLock
try:
import nltk
_snake_case : Any = True
except (ImportError, ModuleNotFoundError):
_snake_case : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def a_ ( lowerCAmelCase_ : str ):
re.sub('<n>', '', lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 284 | 0 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCAmelCase__ : Any = 'us-east-1' # defaults region
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
snake_case__ = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_00,
"save_steps": 55_00,
}
snake_case__ = {**hyperparameters, "max_steps": 10_00}
@property
def __lowerCAmelCase ( self : Dict ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCAmelCase ( self : List[str] ):
return f'''{self.framework}-transfromers-test'''
@property
def __lowerCAmelCase ( self : int ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def __lowerCAmelCase ( self : List[str] ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = SageMakerTestEnvironment(framework=request.cls.framework )
| 98 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
'''simple docstring'''
import re
def a__ ( lowerCAmelCase__ ) -> str:
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
try:
UpperCAmelCase__ : int = split_input(lowerCAmelCase_ )
if upper:
UpperCAmelCase__ : List[Any] = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase__ : List[Any] = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def a__ ( lowerCAmelCase__ ) -> List[str]:
return to_simple_case(lowerCAmelCase_ )
def a__ ( lowerCAmelCase__ ) -> Any:
try:
UpperCAmelCase__ : Tuple = to_simple_case(lowerCAmelCase_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
return to_complex_case(lowerCAmelCase_ , lowerCAmelCase_ , '''_''' )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
return to_complex_case(lowerCAmelCase_ , lowerCAmelCase_ , '''-''' )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 181 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_snake_case : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
_snake_case : str = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
_snake_case : List[str] = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def lowercase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
__lowerCAmelCase = 0.0
for i, j in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase_ , lowerCAmelCase_ ) else 0.0
__lowerCAmelCase = n_correct / len(lowerCAmelCase_ )
return {
"accuracy": accuracy,
}
| 284 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _a ( _UpperCamelCase ):
def lowerCamelCase_ ( self: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = 5
# Realm tok
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowercase__ = os.path.join(lowerCAmelCase_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__ = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> RealmTokenizer:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=lowerCAmelCase_ , )
return block_records
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_config()
lowercase__ = self.get_dummy_retriever()
lowercase__ = retriever.tokenizer
lowercase__ = np.array([0, 3] , dtype='''long''' )
lowercase__ = tokenizer(['''Test question'''] ).input_ids
lowercase__ = tokenizer(
['''the fourth'''] , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ).input_ids
lowercase__ = config.reader_seq_len
lowercase__ , lowercase__ , lowercase__ , lowercase__ = retriever(
lowerCAmelCase_ , lowerCAmelCase_ , answer_ids=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors='''np''' )
self.assertEqual(len(lowerCAmelCase_ ) , 2 )
self.assertEqual(len(lowerCAmelCase_ ) , 2 )
self.assertEqual(len(lowerCAmelCase_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_config()
lowercase__ = self.get_dummy_retriever()
lowercase__ = retriever.tokenizer
lowercase__ = np.array([0, 3, 5] , dtype='''long''' )
lowercase__ = tokenizer(['''Test question'''] ).input_ids
lowercase__ = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ).input_ids
lowercase__ = config.reader_seq_len
lowercase__ , lowercase__ , lowercase__ , lowercase__ = retriever(
lowerCAmelCase_ , lowerCAmelCase_ , answer_ids=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , lowerCAmelCase_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowerCAmelCase_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowerCAmelCase_ )
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
lowercase__ = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
lowercase__ = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
lowercase__ = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 110 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowerCAmelCase = CLIPTextModel(lowerCAmelCase_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=0 ) -> Dict:
__lowerCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' )
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = 'french fries'
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = [inputs['prompt']] * 2
__lowerCAmelCase = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
__lowerCAmelCase = image / 2 + 0.5
__lowerCAmelCase = image.permute(0 , 3 , 1 , 2 )
__lowerCAmelCase = image.repeat(2 , 1 , 1 , 1 )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = [round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' ) )[0]
__lowerCAmelCase = components['vae']
__lowerCAmelCase = self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowerCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )[0]
__lowerCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] , lowerCAmelCase_ : List[Any]=0 ) -> Any:
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
__lowerCAmelCase = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> Dict:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = 0
def callback_fn(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor ) -> None:
__lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__lowerCAmelCase = False
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase ( self : Optional[int] ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCAmelCase = inputs['image'].resize((5_0_4, 5_0_4) )
__lowerCAmelCase = 'timbrooks/instruct-pix2pix'
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__lowerCAmelCase = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 284 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (_UpperCamelCase ):
__snake_case : List[str] = ["pixel_values"]
def __init__( self: List[str] , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[Dict[str, int]] = None , UpperCAmelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_: bool = True , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: bool = True , UpperCAmelCase_: Union[int, float] = 1 / 255 , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , **UpperCAmelCase_: str , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 256}
_SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase_ , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Dict[str, int] , UpperCAmelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Any , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_SCREAMING_SNAKE_CASE = get_resize_output_image_size(lowerCAmelCase_ , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Dict[str, int] , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Optional[Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: float , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase ( self: Any , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Union[float, List[float]] , UpperCAmelCase_: Union[float, List[float]] , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Optional[Any] , ):
'''simple docstring'''
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: ImageInput , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: PILImageResampling = None , UpperCAmelCase_: bool = None , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[float] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: Optional[Union[str, TensorType]] = None , UpperCAmelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase_ , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Tuple] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE = target_sizes.numpy()
_SCREAMING_SNAKE_CASE = []
for idx in range(len(lowerCAmelCase_ ) ):
_SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
_SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 306 |
from timeit import timeit
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a_ ( ):
def do_benchmark(lowerCAmelCase_ : int ) -> None:
__lowerCAmelCase = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit('z.get_set_bits_count_using_modulo_operator(25)', setup=lowerCAmelCase_ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)', setup=lowerCAmelCase_, )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 284 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
lowerCAmelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
lowerCAmelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCAmelCase__ = gray_img.shape
# set different points to rotate image
lowerCAmelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
lowerCAmelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
lowerCAmelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
lowerCAmelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
lowerCAmelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCAmelCase__ = plt.figure(1)
lowerCAmelCase__ = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 130 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case : Dict = pytest.mark.integration
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCAmelCase_ ) for x in np.arange(3_0 ).tolist()]} )
return dset
def lowercase ( self : List[str] ) -> Tuple:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
__lowerCAmelCase = dset.map(
lambda lowerCAmelCase_ , lowerCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ )
__lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase ( self : Optional[Any] ) -> str:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : int ) -> Optional[Any]:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCAmelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase ( self : Union[str, Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search_batch , queries[0] )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> List[str]:
import faiss
__lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase ( self : Union[str, Any] ) -> Dict:
import faiss
__lowerCAmelCase = faiss.IndexFlat(5 )
__lowerCAmelCase = FaissIndex(custom_index=lowerCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase ( self : str ) -> Any:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
__lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__lowerCAmelCase = 'index.faiss'
__lowerCAmelCase = F"""mock://{index_name}"""
index.save(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = FaissIndex.load(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = np.zeros(5, dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> int:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = Elasticsearch()
__lowerCAmelCase = {'acknowledged': True}
__lowerCAmelCase = ElasticSearchIndex(es_client=lowerCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
# batched queries with timeout
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ , request_timeout=3_0 )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
| 284 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case : List[Any] =1_6
__snake_case : Any =3_2
def lowerCAmelCase__ ( lowerCamelCase_ : Accelerator ,lowerCamelCase_ : int = 16):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''')
lowerCAmelCase__ : Optional[Any] = load_dataset('''glue''' ,'''mrpc''')
def tokenize_function(lowerCamelCase_ : int):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : List[str] = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=lowerCAmelCase_ ,max_length=lowerCAmelCase_)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : Any = datasets.map(
lowerCAmelCase_ ,batched=lowerCAmelCase_ ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : Any = tokenized_datasets.rename_column('''label''' ,'''labels''')
def collate_fn(lowerCamelCase_ : List[Any]):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : Dict = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : List[str] = 8
else:
lowerCAmelCase__ : Optional[Any] = None
return tokenizer.pad(
lowerCAmelCase_ ,padding='''longest''' ,max_length=lowerCAmelCase_ ,pad_to_multiple_of=lowerCAmelCase_ ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
lowerCAmelCase__ : List[str] = DataLoader(
tokenized_datasets['''train'''] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ ,drop_last=lowerCAmelCase_)
lowerCAmelCase__ : int = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ ,drop_last=(accelerator.mixed_precision == '''fp8''') ,)
return train_dataloader, eval_dataloader
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : int):
'''simple docstring'''
lowerCAmelCase__ : str = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Union[str, Any] = config['''lr''']
lowerCAmelCase__ : Tuple = int(config['''num_epochs'''])
lowerCAmelCase__ : List[Any] = int(config['''seed'''])
lowerCAmelCase__ : str = int(config['''batch_size'''])
lowerCAmelCase__ : Dict = evaluate.load('''glue''' ,'''mrpc''')
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = get_dataloaders(lowerCAmelCase_ ,lowerCAmelCase_)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=lowerCAmelCase_)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : List[str] = model.to(accelerator.device)
# Instantiate optimizer
lowerCAmelCase__ : Tuple = AdamW(params=model.parameters() ,lr=lowerCAmelCase_)
# Instantiate scheduler
lowerCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ ,num_warmup_steps=100 ,num_training_steps=(len(lowerCAmelCase_) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = accelerator.prepare(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_)
# Now we train the model
for epoch in range(lowerCAmelCase_):
model.train()
for step, batch in enumerate(lowerCAmelCase_):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
lowerCAmelCase__ : Tuple = model(**lowerCAmelCase_)
lowerCAmelCase__ : Optional[int] = outputs.loss
lowerCAmelCase__ : Any = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
lowerCAmelCase__ : int = model(**lowerCAmelCase_)
lowerCAmelCase__ : Optional[int] = outputs.logits.argmax(dim=-1)
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = accelerator.gather_for_metrics((predictions, batch['''labels''']))
metric.add_batch(
predictions=lowerCAmelCase_ ,references=lowerCAmelCase_ ,)
lowerCAmelCase__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" ,lowerCAmelCase_)
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''')
parser.add_argument(
'''--mixed_precision''' ,type=lowerCAmelCase_ ,default=lowerCAmelCase_ ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''')
lowerCAmelCase__ : Union[str, Any] = parser.parse_args()
lowerCAmelCase__ : Dict = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ ,lowerCAmelCase_)
if __name__ == "__main__":
main()
| 129 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected', [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase_, i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0, 4 ), range(4, 7 ), range(7, 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = _distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected', [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = _split_gen_kwargs(lowerCAmelCase_, lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected', [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
], )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any ):
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
__lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 284 | 0 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__a = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__a = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__a = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def __UpperCAmelCase ( a_: Any, a_: Tuple ):
return float((preds == labels).mean() )
def __UpperCAmelCase ( a_: str, a_: List[str] ):
_UpperCAmelCase : List[Any] = simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )
_UpperCAmelCase : Any = float(fa_score(y_true=lowerCAmelCase_, y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( a_: Optional[int], a_: Any ):
_UpperCAmelCase : int = np.array(lowerCAmelCase_ )
_UpperCAmelCase : int = np.array(lowerCAmelCase_ )
_UpperCAmelCase : Any = en_sentvecs.shape[0]
# mean centering
_UpperCAmelCase : Optional[int] = en_sentvecs - np.mean(lowerCAmelCase_, axis=0 )
_UpperCAmelCase : Tuple = in_sentvecs - np.mean(lowerCAmelCase_, axis=0 )
_UpperCAmelCase : Union[str, Any] = cdist(lowerCAmelCase_, lowerCAmelCase_, "cosine" )
_UpperCAmelCase : str = np.array(range(lowerCAmelCase_ ) )
_UpperCAmelCase : Union[str, Any] = sim.argsort(axis=1 )[:, :10]
_UpperCAmelCase : Optional[int] = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase_ , lowerCAmelCase_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" ) | 145 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
a_ = True
def lowercase ( self : Optional[Any] ) -> List[str]:
super().setUp()
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> str:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase ( self : List[Any] , lowerCAmelCase_ : str ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowercase ( self : List[str] ) -> Optional[int]:
pass # TODO add if relevant
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def lowercase ( self : Union[str, Any] ) -> Any:
pass # TODO add if relevant
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : List[Any] ) -> int:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : int ) -> str:
__lowerCAmelCase = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_jumanpp
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = JumanppTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = JumanppTokenizer(normalize_text=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
__lowerCAmelCase = tokenizer.subword_tokenizer
__lowerCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowerCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
__lowerCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowerCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
def lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : str , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase ( self : Dict ) -> str:
pass # TODO add if relevant
def lowercase ( self : Any ) -> str:
pass # TODO add if relevant
def lowercase ( self : List[Any] ) -> int:
pass # TODO add if relevant
def lowercase ( self : str ) -> str:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowerCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
__lowerCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 284 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : list[int] , UpperCamelCase : list[int] ):
# Check if the input is valid
if not len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can\'t be zero.""" )
# Extract the coefficients
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = equationa
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase : Tuple = aa * ba - aa * ba
UpperCAmelCase : Tuple = ca * ba - ca * ba
UpperCAmelCase : Tuple = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase : str = determinant_x / determinant
UpperCAmelCase : Optional[int] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 109 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """beit"""
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=8_1_9_2 , lowerCAmelCase_ : Optional[int]=7_6_8 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Any=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=[3, 5, 7, 1_1] , lowerCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=0.4 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=2_5_5 , **lowerCAmelCase_ : Any , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : Optional[Any] ) -> float:
return 1e-4
| 284 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
'''simple docstring'''
def __init__( self : Dict , lowercase_ : Dict , lowercase_ : int=13 , lowercase_ : Any=7 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=True , lowercase_ : List[Any]=True , lowercase_ : Dict=True , lowercase_ : Tuple=99 , lowercase_ : int=16 , lowercase_ : List[str]=36 , lowercase_ : Optional[int]=6 , lowercase_ : Tuple=6 , lowercase_ : Dict=6 , lowercase_ : List[str]=37 , lowercase_ : Dict="gelu" , lowercase_ : Any=0.1 , lowercase_ : str=0.1 , lowercase_ : List[str]=512 , lowercase_ : Any=16 , lowercase_ : List[Any]=2 , lowercase_ : List[Any]=0.02 , lowercase_ : str=3 , lowercase_ : Dict=4 , lowercase_ : Union[str, Any]=None , ) -> Union[str, Any]:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : Optional[Any] = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : Tuple = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[Any] = embedding_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : str = num_hidden_groups
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : str = num_choices
UpperCAmelCase : List[Any] = scope
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Dict = None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : int = None
UpperCAmelCase : List[Any] = None
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : List[Any] = AlbertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
UpperCAmelCase : List[str] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> Optional[Any]:
UpperCAmelCase : Dict = AlbertForPreTraining(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : str = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , sentence_order_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCAmelCase_ ( self : str , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ) -> Any:
UpperCAmelCase : List[Any] = AlbertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : int = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Any ) -> Tuple:
UpperCAmelCase : Any = AlbertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : Union[str, Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Any ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Any = AlbertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : Dict = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> str:
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : Optional[Any] = AlbertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : Tuple = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
UpperCAmelCase : Optional[int] = self.num_choices
UpperCAmelCase : str = AlbertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Dict = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : List[Any] = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Dict = True
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Dict=False ) -> Dict:
UpperCAmelCase : str = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
UpperCAmelCase : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : List[Any] = AlbertModelTester(self )
UpperCAmelCase : str = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : int ) -> Tuple:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : Any ) -> str:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : Any = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@slow
def UpperCAmelCase_ ( self : Dict ) -> int:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = AlbertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : Tuple ) -> int:
UpperCAmelCase : Optional[int] = AlbertModel.from_pretrained('albert-base-v2' )
UpperCAmelCase : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCAmelCase : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
UpperCAmelCase : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) )
| 151 |
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 284 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowercase : Any = True
except (ImportError, ModuleNotFoundError):
__lowercase : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
re.sub('''<n>''' , '''''' , lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 318 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Any = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """pegasus"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , lowerCAmelCase_ : Union[str, Any]=5_0_2_6_5 , lowerCAmelCase_ : Union[str, Any]=1_0_2_4 , lowerCAmelCase_ : Union[str, Any]=1_2 , lowerCAmelCase_ : Dict=4_0_9_6 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : List[Any]=1_2 , lowerCAmelCase_ : Union[str, Any]=4_0_9_6 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=1_0_2_4 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Tuple , ) -> List[str]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
@property
def lowercase ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def lowercase ( self : Optional[Any] ) -> int:
return self.d_model
| 284 | 0 |
import cva
import numpy as np
class UpperCamelCase__ :
def __init__(self : Union[str, Any] , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
__a : Tuple = k
__a : List[Any] = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self : int ):
return str(self.k )
def lowerCAmelCase (self : Dict , snake_case_ : str ):
__a : List[Any] = cva.imread(lowerCAmelCase_ , 0 )
__a , __a : Optional[int] = img.shape
__a : List[str] = []
__a : Union[str, Any] = img.copy()
__a : Optional[int] = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_GRAY2RGB )
__a , __a : Tuple = np.gradient(lowerCAmelCase_ )
__a : Optional[Any] = dx**2
__a : Optional[Any] = dy**2
__a : List[str] = dx * dy
__a : Optional[Any] = 0.04
__a : str = self.window_size // 2
for y in range(lowerCAmelCase_ , h - offset ):
for x in range(lowerCAmelCase_ , w - offset ):
__a : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__a : List[str] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__a : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__a : str = (wxx * wyy) - (wxy**2)
__a : Tuple = wxx + wyy
__a : Optional[int] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
lowercase__ =HarrisCorner(0.04, 3)
lowercase__ =edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 216 |
def a_ ( lowerCAmelCase_ : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__lowerCAmelCase = 4
__lowerCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
__lowerCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 284 | 0 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCAmelCase_ )
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
UpperCAmelCase__ = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
UpperCAmelCase__ = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCAmelCase__ = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase__ = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
UpperCAmelCase__ = f'''layers_{str(lowerCAmelCase_ )}'''
# Self-Attention
UpperCAmelCase__ = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
UpperCAmelCase__ = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
UpperCAmelCase__ = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
UpperCAmelCase__ = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase__ = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
UpperCAmelCase__ = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
UpperCAmelCase__ = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
UpperCAmelCase__ = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
UpperCAmelCase__ = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
UpperCAmelCase__ = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
UpperCAmelCase__ = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
UpperCAmelCase__ = flax_model.params['encoder']['block'][str(lowerCAmelCase_ )]['layer']
UpperCAmelCase__ = tax_attention_key
UpperCAmelCase__ = tax_attention_out
UpperCAmelCase__ = tax_attention_query
UpperCAmelCase__ = tax_attention_value
UpperCAmelCase__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase__ = tax_global_layer_norm
if split_mlp_wi:
UpperCAmelCase__ = tax_mlp_wi_a
UpperCAmelCase__ = tax_mlp_wi_a
else:
UpperCAmelCase__ = tax_mlp_wi
UpperCAmelCase__ = tax_mlp_wo
UpperCAmelCase__ = tax_mlp_layer_norm
UpperCAmelCase__ = flax_model_encoder_layer_block
# Only for layer 0:
UpperCAmelCase__ = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
UpperCAmelCase__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCAmelCase__ = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
UpperCAmelCase__ = tax_encoder_global_rel_embedding
# Assigning
UpperCAmelCase__ = tax_model['target']['encoder']['encoder_norm']['scale']
UpperCAmelCase__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCAmelCase__ = f'''layers_{str(lowerCAmelCase_ )}'''
# Self-Attention
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
UpperCAmelCase__ = tax_enc_dec_attention_module['key']['kernel']
UpperCAmelCase__ = tax_enc_dec_attention_module['out']['kernel']
UpperCAmelCase__ = tax_enc_dec_attention_module['query']['kernel']
UpperCAmelCase__ = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
UpperCAmelCase__ = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
UpperCAmelCase__ = flax_model.params['decoder']['block'][str(lowerCAmelCase_ )]['layer']
UpperCAmelCase__ = tax_attention_key
UpperCAmelCase__ = tax_attention_out
UpperCAmelCase__ = tax_attention_query
UpperCAmelCase__ = tax_attention_value
UpperCAmelCase__ = tax_pre_attention_layer_norm
UpperCAmelCase__ = tax_enc_dec_attention_key
UpperCAmelCase__ = tax_enc_dec_attention_out
UpperCAmelCase__ = tax_enc_dec_attention_query
UpperCAmelCase__ = tax_enc_dec_attention_value
UpperCAmelCase__ = tax_cross_layer_norm
if split_mlp_wi:
UpperCAmelCase__ = tax_mlp_wi_a
UpperCAmelCase__ = tax_mlp_wi_a
else:
UpperCAmelCase__ = tax_mlp_wi
UpperCAmelCase__ = tax_mlp_wo
UpperCAmelCase__ = txa_mlp_layer_norm
UpperCAmelCase__ = flax_model_decoder_layer_block
# Decoder Normalization
UpperCAmelCase__ = tax_model['target']['decoder']['decoder_norm']['scale']
UpperCAmelCase__ = txa_decoder_norm
# Only for layer 0:
UpperCAmelCase__ = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
UpperCAmelCase__ = tax_decoder_rel_embedding
# Token Embeddings
UpperCAmelCase__ = tax_model['target']['token_embedder']['embedding']
UpperCAmelCase__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCAmelCase__ = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(lowerCAmelCase_ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCAmelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
lowerCAmelCase__ : Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 98 |
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : list[int], lowerCAmelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
return min(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(lowerCAmelCase_ ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 284 | 0 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
UpperCAmelCase__ : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
UpperCAmelCase__ : str = [sys.executable] + distributed_args
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
| 181 |
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 0 |
import datasets
lowerCAmelCase = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
lowerCAmelCase = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
lowerCAmelCase = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] ) -> Optional[int]:
"""simple docstring"""
return {"accuracy": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
| 110 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 284 | 0 |
from __future__ import annotations
UpperCamelCase = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __UpperCAmelCase :
def __init__( self: Optional[Any] , UpperCAmelCase_: dict[str, list[str]] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = graph
# mapping node to its parent in resulting breadth first tree
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = source_vertex
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {self.source_vertex}
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = [self.source_vertex] # first in first out queue
while queue:
_SCREAMING_SNAKE_CASE = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = vertex
queue.append(lowerCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_SCREAMING_SNAKE_CASE = self.parent.get(lowerCAmelCase_ )
if target_vertex_parent is None:
_SCREAMING_SNAKE_CASE = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(lowerCAmelCase_ )
return self.shortest_path(lowerCAmelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCamelCase = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 306 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple=1024, lowerCAmelCase_ : Optional[Any]=1024, lowerCAmelCase_ : Tuple=False, **lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='train', **lowerCAmelCase_ )
__lowerCAmelCase = tok.pad_token_id
def get_lens(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = tqdm(
DataLoader(lowerCAmelCase_, batch_size=512, num_workers=8, shuffle=lowerCAmelCase_, collate_fn=ds.collate_fn ), desc=str(ds.len_file ), )
__lowerCAmelCase = []
for batch in dl:
__lowerCAmelCase = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
__lowerCAmelCase = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_, lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_, lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='val', **lowerCAmelCase_ )
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_, train_ds.len_file )
pickle_save(lowerCAmelCase_, val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 284 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : List[Any]=None ):
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCAmelCase_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
a_ = OPTConfig
a_ = {}
a_ = """gelu"""
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Any=9_9 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=2_0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_6 , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = embed_dim
__lowerCAmelCase = word_embed_proj_dim
__lowerCAmelCase = False
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
__lowerCAmelCase = prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = TFOPTModel(config=lowerCAmelCase_ )
__lowerCAmelCase = inputs_dict['input_ids']
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict['attention_mask'][:1, :]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a_ = (TFOPTForCausalLM,) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
a_ = 10
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFOPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
__lowerCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
__lowerCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return tf.constant(lowerCAmelCase_, dtype=tf.intaa )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = 99
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
__lowerCAmelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
__lowerCAmelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Dict:
super().setUp()
__lowerCAmelCase = 'facebook/opt-350m'
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Optional[int] ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase ( self : int ) -> str:
__lowerCAmelCase = 'facebook/opt-125m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = 'left'
# use different length sentences to test batching
__lowerCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ )
__lowerCAmelCase = inputs['input_ids']
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs['attention_mask'] )
__lowerCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ )
__lowerCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
__lowerCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 284 | 0 |
def lowerCAmelCase__ ( lowerCamelCase_ : int = 2000000):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [0 for i in range(n + 1)]
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : str = 1
for i in range(2 ,int(n**0.5) + 1):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,lowerCAmelCase_):
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : List[Any] = 0
for i in range(lowerCAmelCase_):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 129 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['LayoutLMv3FeatureExtractor']
_snake_case : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
'''simple docstring'''
import math
def __UpperCAmelCase ( a_: int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(lowerCAmelCase_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( a_: int = 10_001 ):
try:
_UpperCAmelCase : List[Any] = int(lowerCAmelCase_ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Any = 2
while len(lowerCAmelCase_ ) < nth:
if is_prime(lowerCAmelCase_ ):
primes.append(lowerCAmelCase_ )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase_ ) - 1]
if __name__ == "__main__":
print(f'{solution() = }') | 145 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 284 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCAmelCase_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 109 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_snake_case : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_snake_case : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_snake_case : set[int] = {ord(char) for char in VALID_CHARS}
_snake_case : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCAmelCase_ : list[int], lowerCAmelCase_ : tuple[int, ...] ):
__lowerCAmelCase = ""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ), lowerCAmelCase_ ):
__lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def a_ ( lowerCAmelCase_ : list[int] ):
__lowerCAmelCase = []
for key in product(lowerCAmelCase_, repeat=3 ):
__lowerCAmelCase = try_key(lowerCAmelCase_, lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def a_ ( lowerCAmelCase_ : list[str], lowerCAmelCase_ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCAmelCase_ : str = "p059_cipher.txt" ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding='utf-8' )
__lowerCAmelCase = [int(lowerCAmelCase_ ) for number in data.strip().split(',' )]
__lowerCAmelCase = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
__lowerCAmelCase = filter_common_word(lowerCAmelCase_, lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
__lowerCAmelCase = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 284 | 0 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowercase__ = logging.getLogger(__name__)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , ):
UpperCAmelCase : Optional[int] = bnb_quantization_config.load_in_abit
UpperCAmelCase : Any = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
UpperCAmelCase : Optional[Any] = []
# custom device map
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(device_map.keys() ) > 1:
UpperCAmelCase : Union[str, Any] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCAmelCase : List[Any] = get_keys_to_not_convert(lowerCAmelCase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase_ )
UpperCAmelCase : List[str] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCAmelCase : Any = []
UpperCAmelCase : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase_ )
# compatibility with peft
UpperCAmelCase : Optional[Any] = load_in_abit
UpperCAmelCase : Tuple = load_in_abit
UpperCAmelCase : List[Any] = get_parameter_device(lowerCAmelCase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
UpperCAmelCase : Tuple = replace_with_bnb_layers(lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
# convert param to the right dtype
UpperCAmelCase : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCAmelCase : int = name.replace('.weight' , '' ).replace('.bias' , '' )
UpperCAmelCase : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase_ ):
param.to(lowerCAmelCase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
UpperCAmelCase : str = replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
UpperCAmelCase : Any = get_quantized_model_device_map(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_memory=lowerCAmelCase_ , no_split_module_classes=lowerCAmelCase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Union[str, Any] = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase_ , offload_state_dict=lowerCAmelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase_ , device_map=lowerCAmelCase_ , offload_dir=lowerCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if device_map is None:
if torch.cuda.is_available():
UpperCAmelCase : Optional[Any] = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
UpperCAmelCase : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCAmelCase : List[str] = {}
UpperCAmelCase : str = special_dtypes
UpperCAmelCase : str = no_split_module_classes
UpperCAmelCase : Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCAmelCase : Tuple = get_balanced_memory(
lowerCAmelCase_ , low_zero=(device_map == 'balanced_low_0') , max_memory=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase : Tuple = max_memory
UpperCAmelCase : int = infer_auto_device_map(lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# check if don't have any quantized module on the cpu
UpperCAmelCase : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCAmelCase : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if modules_to_not_convert is None:
UpperCAmelCase : List[Any] = []
UpperCAmelCase , UpperCAmelCase : str = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
UpperCAmelCase : str = False
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase : str = []
current_key_name.append(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCAmelCase : List[str] = '.'.join(lowerCAmelCase_ )
UpperCAmelCase : Dict = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCAmelCase : str = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCAmelCase : Dict = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCAmelCase : Tuple = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
UpperCAmelCase : Union[str, Any] = module.weight.data
if module.bias is not None:
UpperCAmelCase : Optional[Any] = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = True
if len(list(module.children() ) ) > 0:
UpperCAmelCase , UpperCAmelCase : Dict = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : str = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase( UpperCAmelCase_ ):
# Create a copy of the model
with init_empty_weights():
UpperCAmelCase : int = deepcopy(lowerCAmelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCAmelCase : Any = find_tied_parameters(lowerCAmelCase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase : int = sum(lowerCAmelCase_ , [] )
UpperCAmelCase : Tuple = len(lowerCAmelCase_ ) > 0
# Check if it is a base model
UpperCAmelCase : Tuple = False
if hasattr(lowerCAmelCase_ , 'base_model_prefix' ):
UpperCAmelCase : List[str] = not hasattr(lowerCAmelCase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase : str = list(model.named_children() )
UpperCAmelCase : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase : Optional[int] = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
UpperCAmelCase : Tuple = list(set(lowerCAmelCase_ ) ) + list(lowerCAmelCase_ )
# remove ".weight" from the keys
UpperCAmelCase : int = ['.weight', '.bias']
UpperCAmelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase : Optional[int] = name.replace(lowerCAmelCase_ , '' )
filtered_module_names.append(lowerCAmelCase_ )
return filtered_module_names
def UpperCamelCase( UpperCAmelCase_ ):
for m in model.modules():
if isinstance(lowerCAmelCase_ , bnb.nn.Linearabit ):
return True
return False
def UpperCamelCase( UpperCAmelCase_ ):
return next(parameter.parameters() ).device
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 0 , dtype=lowerCAmelCase_ , value=lowerCAmelCase_ )
UpperCAmelCase : Dict = param_name
UpperCAmelCase : List[Any] = model
if "." in tensor_name:
UpperCAmelCase : Optional[Any] = tensor_name.split('.' )
for split in splits[:-1]:
UpperCAmelCase : Dict = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCAmelCase : Tuple = new_module
UpperCAmelCase : Optional[int] = splits[-1]
# offload weights
UpperCAmelCase : str = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase_ , index=lowerCAmelCase_ , )
else:
offload_weight(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
offload_weight(lowerCAmelCase_ , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase_ , index=lowerCAmelCase_ )
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 'meta' , dtype=lowerCAmelCase_ , value=torch.empty(*param.size() ) )
| 151 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_snake_case : Tuple = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_snake_case : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a_ ( lowerCAmelCase_ : str ):
if "://" in dataset_path:
__lowerCAmelCase = dataset_path.split('://' )[1]
return dataset_path
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = not is_remote_filesystem(lowerCAmelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase_ ), fs._strip_protocol(lowerCAmelCase_ ) )
else:
fs.mv(lowerCAmelCase_, lowerCAmelCase_, recursive=lowerCAmelCase_ )
def a_ ( ):
if hasattr(fsspec.asyn, 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = threading.Lock()
| 284 | 0 |
'''simple docstring'''
__lowercase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowercase : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowercase : str = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
assert len(str(lowerCAmelCase_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowerCamelCase_ : Any = year // 100
lowerCamelCase_ : Union[str, Any] = (5 * (century % 4) + 2) % 7
lowerCamelCase_ : Any = year % 100
lowerCamelCase_ : List[str] = centurian % 12
lowerCamelCase_ : List[str] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowerCamelCase_ : List[str] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowerCamelCase_ : int = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int] ):
if metric == "rouge2":
__lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__lowerCAmelCase = ModelCheckpoint(
dirpath=lowerCAmelCase_, filename=lowerCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ):
return EarlyStopping(
monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase_, verbose=lowerCAmelCase_, )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / 'test_results.txt'
__lowerCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'a+' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Dict:
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , 'test' )
@rank_zero_only
def lowercase ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Any ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 284 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowercase__ ='\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
lowercase__ ='\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
lowercase__ ='\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
def lowerCAmelCase (self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def lowerCAmelCase (self : List[Any] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any]=4 , snake_case_ : str=False ):
__a : Union[str, Any] = compute_bleu(
reference_corpus=lowerCAmelCase_ , translation_corpus=lowerCAmelCase_ , max_order=lowerCAmelCase_ , smooth=lowerCAmelCase_ )
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Any = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 216 |
import re
from filelock import FileLock
try:
import nltk
_snake_case : Any = True
except (ImportError, ModuleNotFoundError):
_snake_case : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def a_ ( lowerCAmelCase_ : str ):
re.sub('<n>', '', lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 284 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase__ : List[str] = logging.getLogger(__name__)
lowerCAmelCase__ : List[str] = tf.data.AUTOTUNE
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=lowerCAmelCase_ , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=lowerCAmelCase_ , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=lowerCAmelCase_ , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=lowerCAmelCase_ , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=lowerCAmelCase_ , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=lowerCAmelCase_ , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=lowerCAmelCase_ , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=lowerCAmelCase_ , default=2**1_8 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=lowerCAmelCase_ , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=lowerCAmelCase_ , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=lowerCAmelCase_ , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=lowerCAmelCase_ , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=lowerCAmelCase_ , default=5_1_2 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=lowerCAmelCase_ , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=lowerCAmelCase_ , help='Model ID to upload to on the Hugging Face Hub.' )
UpperCAmelCase__ = parser.parse_args()
return args
def a_ ( lowerCamelCase ):
try:
if args.tpu_name:
UpperCAmelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(lowerCAmelCase_ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase_ )
return tpu
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
for file in file_list:
UpperCAmelCase__ = file.split('/' )[-1]
UpperCAmelCase__ = re.search(r'-\d+-(\d+)\.tfrecord' , lowerCAmelCase_ ).group(1 )
UpperCAmelCase__ = int(lowerCAmelCase_ )
num_samples += sample_count
return num_samples
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
UpperCAmelCase__ = count_samples(lowerCAmelCase_ )
UpperCAmelCase__ = tf.data.Dataset.from_tensor_slices(lowerCAmelCase_ )
if shuffle:
UpperCAmelCase__ = dataset.shuffle(len(lowerCAmelCase_ ) )
UpperCAmelCase__ = tf.data.TFRecordDataset(lowerCAmelCase_ , num_parallel_reads=lowerCAmelCase_ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase__ = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase_ ) )
UpperCAmelCase__ = dataset.map(lowerCAmelCase_ , num_parallel_calls=lowerCAmelCase_ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase__ = dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase__ = dataset.batch(lowerCAmelCase_ , drop_remainder=lowerCAmelCase_ )
UpperCAmelCase__ = dataset.map(lowerCAmelCase_ , num_parallel_calls=lowerCAmelCase_ )
UpperCAmelCase__ = dataset.prefetch(lowerCAmelCase_ )
return dataset
def a_ ( lowerCamelCase ):
if not args.no_tpu:
UpperCAmelCase__ = initialize_tpu(lowerCAmelCase_ )
UpperCAmelCase__ = tf.distribute.TPUStrategy(lowerCAmelCase_ )
else:
UpperCAmelCase__ = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase__ = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase__ = tokenizer.vocab_size
UpperCAmelCase__ = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
UpperCAmelCase__ = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
UpperCAmelCase__ = count_samples(lowerCAmelCase_ )
UpperCAmelCase__ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase__ = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase__ = TFAutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase__ , UpperCAmelCase__ = create_optimizer(
num_train_steps=lowerCAmelCase_ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase_ , metrics=['accuracy'] )
def decode_fn(lowerCamelCase ):
UpperCAmelCase__ = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase_ , lowerCAmelCase_ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase__ = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase_ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase_ , return_tensors='tf' )
def mask_with_collator(lowerCamelCase ):
# TF really needs an isin() function
UpperCAmelCase__ = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
UpperCAmelCase__ , UpperCAmelCase__ = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(lowerCAmelCase_ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase_ , )
return batch
UpperCAmelCase__ = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase__ = prepare_dataset(
lowerCAmelCase_ , decode_fn=lowerCAmelCase_ , mask_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase__ = prepare_dataset(
lowerCAmelCase_ , decode_fn=lowerCAmelCase_ , mask_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , )
UpperCAmelCase__ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase_ ) )
model.fit(
lowerCAmelCase_ , validation_data=lowerCAmelCase_ , epochs=args.num_epochs , callbacks=lowerCAmelCase_ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] = parse_args()
main(args)
| 98 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_snake_case : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
_snake_case : str = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
_snake_case : List[str] = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def lowercase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
__lowerCAmelCase = 0.0
for i, j in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase_ , lowerCAmelCase_ ) else 0.0
__lowerCAmelCase = n_correct / len(lowerCAmelCase_ )
return {
"accuracy": accuracy,
}
| 284 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _a ( _UpperCamelCase ):
_lowercase : Optional[int] = '''codegen'''
_lowercase : Dict = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: List[Any] , UpperCamelCase_: Optional[int]=50_400 , UpperCamelCase_: List[Any]=2_048 , UpperCamelCase_: Dict=2_048 , UpperCamelCase_: List[Any]=4_096 , UpperCamelCase_: List[str]=28 , UpperCamelCase_: Tuple=16 , UpperCamelCase_: Dict=64 , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]="gelu_new" , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: List[str]=0.0 , UpperCamelCase_: List[str]=1E-5 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=50_256 , UpperCamelCase_: Optional[int]=50_256 , UpperCamelCase_: int=False , **UpperCamelCase_: Union[str, Any] , ) -> str:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = n_ctx
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = n_inner
lowercase__ = rotary_dim
lowercase__ = activation_function
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = attn_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = bos_token_id
lowercase__ = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ )
class _a ( _UpperCamelCase ):
def __init__( self: Union[str, Any] , UpperCamelCase_: PretrainedConfig , UpperCamelCase_: str = "default" , UpperCamelCase_: List[PatchingSpec] = None , UpperCamelCase_: bool = False , ) -> str:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , task=lowerCAmelCase_ , patching_specs=lowerCAmelCase_ , use_past=lowerCAmelCase_ )
if not getattr(self._config , '''pad_token_id''' , lowerCAmelCase_ ):
# TODO: how to do that better?
lowercase__ = 0
@property
def lowerCamelCase_ ( self: int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='''inputs''' )
lowercase__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
return self._config.n_head
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase__ = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
lowercase__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__ , lowercase__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__ = seqlen + 2
lowercase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__ = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
lowercase__ = common_inputs['''attention_mask''']
if self.use_past:
lowercase__ = ordered_inputs['''attention_mask'''].dtype
lowercase__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return 13
| 110 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowerCAmelCase = CLIPTextModel(lowerCAmelCase_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=0 ) -> Dict:
__lowerCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' )
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = 'french fries'
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = [inputs['prompt']] * 2
__lowerCAmelCase = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
__lowerCAmelCase = image / 2 + 0.5
__lowerCAmelCase = image.permute(0 , 3 , 1 , 2 )
__lowerCAmelCase = image.repeat(2 , 1 , 1 , 1 )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = [round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' ) )[0]
__lowerCAmelCase = components['vae']
__lowerCAmelCase = self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowerCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )[0]
__lowerCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] , lowerCAmelCase_ : List[Any]=0 ) -> Any:
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
__lowerCAmelCase = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> Dict:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = 0
def callback_fn(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor ) -> None:
__lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__lowerCAmelCase = False
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase ( self : Optional[int] ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCAmelCase = inputs['image'].resize((5_0_4, 5_0_4) )
__lowerCAmelCase = 'timbrooks/instruct-pix2pix'
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__lowerCAmelCase = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 284 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class __UpperCAmelCase (_UpperCamelCase ):
__snake_case : List[Any] = "data2vec-vision"
def __init__( self: str , UpperCAmelCase_: Optional[Any]=768 , UpperCAmelCase_: Union[str, Any]=12 , UpperCAmelCase_: str=12 , UpperCAmelCase_: Optional[Any]=3_072 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.0 , UpperCAmelCase_: str=0.0 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: Optional[Any]=1E-12 , UpperCAmelCase_: Union[str, Any]=224 , UpperCAmelCase_: Optional[Any]=16 , UpperCAmelCase_: Optional[Any]=3 , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[int]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Tuple=False , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: List[str]=[3, 5, 7, 11] , UpperCAmelCase_: List[Any]=[1, 2, 3, 6] , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: Any=0.4 , UpperCAmelCase_: Tuple=256 , UpperCAmelCase_: Dict=1 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: Optional[Any]=255 , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = use_mask_token
_SCREAMING_SNAKE_CASE = use_absolute_position_embeddings
_SCREAMING_SNAKE_CASE = use_relative_position_bias
_SCREAMING_SNAKE_CASE = use_shared_relative_position_bias
_SCREAMING_SNAKE_CASE = layer_scale_init_value
_SCREAMING_SNAKE_CASE = drop_path_rate
_SCREAMING_SNAKE_CASE = use_mean_pooling
# decode head attributes (semantic segmentation)
_SCREAMING_SNAKE_CASE = out_indices
_SCREAMING_SNAKE_CASE = pool_scales
# auxiliary head attributes (semantic segmentation)
_SCREAMING_SNAKE_CASE = use_auxiliary_head
_SCREAMING_SNAKE_CASE = auxiliary_loss_weight
_SCREAMING_SNAKE_CASE = auxiliary_channels
_SCREAMING_SNAKE_CASE = auxiliary_num_convs
_SCREAMING_SNAKE_CASE = auxiliary_concat_input
_SCREAMING_SNAKE_CASE = semantic_loss_ignore_index
class __UpperCAmelCase (_UpperCamelCase ):
__snake_case : List[Any] = version.parse("1.11" )
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return 1E-4
| 306 |
from timeit import timeit
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a_ ( ):
def do_benchmark(lowerCAmelCase_ : int ) -> None:
__lowerCAmelCase = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit('z.get_set_bits_count_using_modulo_operator(25)', setup=lowerCAmelCase_ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)', setup=lowerCAmelCase_, )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 284 | 0 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = []
for data in source_data:
for i, el in enumerate(lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowerCAmelCase_ ) )
return data_lists
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = []
for dlist, weight in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
lowercase__ : Union[str, Any] = min(lowerCAmelCase_ )
lowercase__ : Dict = max(lowerCAmelCase_ )
lowercase__ : int = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase__ : Optional[Any] = F"""Invalid weight of {weight:f} provided"""
raise ValueError(lowerCAmelCase_ )
score_lists.append(lowerCAmelCase_ )
return score_lists
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowerCAmelCase_ ):
lowercase__ : Optional[int] = final_scores[j] + ele
return final_scores
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = get_data(lowerCAmelCase_ )
lowercase__ : Optional[Any] = calculate_each_score(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : Optional[Any] = generate_final_scores(lowerCAmelCase_ )
# append scores to source data
for i, ele in enumerate(lowerCAmelCase_ ):
source_data[i].append(lowerCAmelCase_ )
return source_data
| 130 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case : Dict = pytest.mark.integration
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCAmelCase_ ) for x in np.arange(3_0 ).tolist()]} )
return dset
def lowercase ( self : List[str] ) -> Tuple:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
__lowerCAmelCase = dset.map(
lambda lowerCAmelCase_ , lowerCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ )
__lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase ( self : Optional[Any] ) -> str:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : int ) -> Optional[Any]:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCAmelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase ( self : Union[str, Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search_batch , queries[0] )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> List[str]:
import faiss
__lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase ( self : Union[str, Any] ) -> Dict:
import faiss
__lowerCAmelCase = faiss.IndexFlat(5 )
__lowerCAmelCase = FaissIndex(custom_index=lowerCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase ( self : str ) -> Any:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
__lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__lowerCAmelCase = 'index.faiss'
__lowerCAmelCase = F"""mock://{index_name}"""
index.save(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = FaissIndex.load(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = np.zeros(5, dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> int:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = Elasticsearch()
__lowerCAmelCase = {'acknowledged': True}
__lowerCAmelCase = ElasticSearchIndex(es_client=lowerCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
# batched queries with timeout
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ , request_timeout=3_0 )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
| 284 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[Any] ={
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int =[
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected', [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase_, i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0, 4 ), range(4, 7 ), range(7, 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = _distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected', [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = _split_gen_kwargs(lowerCAmelCase_, lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected', [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
], )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any ):
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
__lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 284 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure) | 145 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
a_ = True
def lowercase ( self : Optional[Any] ) -> List[str]:
super().setUp()
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> str:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase ( self : List[Any] , lowerCAmelCase_ : str ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowercase ( self : List[str] ) -> Optional[int]:
pass # TODO add if relevant
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def lowercase ( self : Union[str, Any] ) -> Any:
pass # TODO add if relevant
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : List[Any] ) -> int:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : int ) -> str:
__lowerCAmelCase = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_jumanpp
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = JumanppTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = JumanppTokenizer(normalize_text=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
__lowerCAmelCase = tokenizer.subword_tokenizer
__lowerCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowerCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
__lowerCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowerCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
def lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : str , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase ( self : Dict ) -> str:
pass # TODO add if relevant
def lowercase ( self : Any ) -> str:
pass # TODO add if relevant
def lowercase ( self : List[Any] ) -> int:
pass # TODO add if relevant
def lowercase ( self : str ) -> str:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowerCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
__lowerCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 284 | 0 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = BertJapaneseTokenizer
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Optional[Any] = True
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。"""
UpperCAmelCase : str = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.get_input_output_texts(lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase : str = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = """こんにちは、世界。\nこんばんは、世界。"""
UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase : Any = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowerCAmelCase_ , """wb""" ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , """rb""" ) as handle:
UpperCAmelCase : Optional[Any] = pickle.load(lowerCAmelCase_ )
UpperCAmelCase : List[Any] = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
try:
UpperCAmelCase : Tuple = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
try:
UpperCAmelCase : Dict = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
try:
UpperCAmelCase : List[str] = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(lowerCAmelCase_ )
UpperCAmelCase : Any = """こんにちは、世界。\nこんばんは、世界。"""
UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowerCAmelCase_ , """wb""" ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , """rb""" ) as handle:
UpperCAmelCase : Optional[Any] = pickle.load(lowerCAmelCase_ )
UpperCAmelCase : List[str] = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Any = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(lowerCAmelCase_ )
UpperCAmelCase : Tuple = """こんにちは、世界。\nこんばんは、世界。"""
UpperCAmelCase : Optional[int] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase : Any = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(lowerCAmelCase_ , """wb""" ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , """rb""" ) as handle:
UpperCAmelCase : Dict = pickle.load(lowerCAmelCase_ )
UpperCAmelCase : int = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict = JumanppTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any = JumanppTokenizer(normalize_text=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
UpperCAmelCase : int = {}
for i, token in enumerate(lowerCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = i
UpperCAmelCase : Any = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Dict = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
UpperCAmelCase : Any = tokenizer.subword_tokenizer
UpperCAmelCase : List[str] = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
UpperCAmelCase : Optional[int] = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
UpperCAmelCase : Optional[int] = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase : int = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__lowerCAmelCase : Dict = BertJapaneseTokenizer
__lowerCAmelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
UpperCAmelCase : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = """こんにちは、世界。 \nこんばんは、世界。"""
UpperCAmelCase : List[Any] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Dict = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
UpperCAmelCase : int = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
lowerCAmelCase_ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
UpperCAmelCase : Any = {}
for i, token in enumerate(lowerCAmelCase_ ):
UpperCAmelCase : Optional[Any] = i
UpperCAmelCase : Tuple = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
UpperCAmelCase : int = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase : List[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict = """cl-tohoku/bert-base-japanese"""
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
UpperCAmelCase : Optional[Any] = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 109 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """beit"""
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=8_1_9_2 , lowerCAmelCase_ : Optional[int]=7_6_8 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Any=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=[3, 5, 7, 1_1] , lowerCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=0.4 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=2_5_5 , **lowerCAmelCase_ : Any , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : Optional[Any] ) -> float:
return 1e-4
| 284 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , lowercase_ : Optional[Any] , lowercase_ : List[Any]=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Dict=30 , lowercase_ : Optional[int]=400 , lowercase_ : List[Any]=True , lowercase_ : str=None , lowercase_ : List[Any]=True , lowercase_ : Any=1 / 255 , lowercase_ : Optional[Any]=True , lowercase_ : List[str]=[0.5, 0.5, 0.5] , lowercase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowercase_ : Dict=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : List[str] = min_resolution
UpperCAmelCase : List[str] = max_resolution
UpperCAmelCase : Tuple = do_resize
UpperCAmelCase : Dict = size
UpperCAmelCase : Optional[Any] = do_rescale
UpperCAmelCase : Union[str, Any] = rescale_factor
UpperCAmelCase : Dict = do_normalize
UpperCAmelCase : Union[str, Any] = image_mean
UpperCAmelCase : Dict = image_std
UpperCAmelCase : int = do_pad
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] , lowercase_ : Dict=False ) -> Optional[int]:
if not batched:
UpperCAmelCase : Tuple = image_inputs[0]
if isinstance(lowerCAmelCase_ , Image.Image ):
UpperCAmelCase , UpperCAmelCase : Tuple = image.size
else:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase : Tuple = int(self.size['shortest_edge'] * h / w )
UpperCAmelCase : Union[str, Any] = self.size['shortest_edge']
elif w > h:
UpperCAmelCase : Optional[int] = self.size['shortest_edge']
UpperCAmelCase : str = int(self.size['shortest_edge'] * w / h )
else:
UpperCAmelCase : Optional[int] = self.size['shortest_edge']
UpperCAmelCase : int = self.size['shortest_edge']
else:
UpperCAmelCase : Optional[int] = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase : Tuple = max(lowerCAmelCase_ , key=lambda lowercase_ : item[0] )[0]
UpperCAmelCase : int = max(lowerCAmelCase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = DetrImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : str = DetrImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : Any ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_rescale' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'rescale_factor' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_pad' ) )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase_ )
UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self : int ) -> Any:
# Initialize image_processing
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : int = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : Optional[int] = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
# Initialize image_processing
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : str = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> str:
# prepare image and target
UpperCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCAmelCase : Tuple = json.loads(f.read() )
UpperCAmelCase : Optional[int] = {'image_id': 39_769, 'annotations': target}
# encode them
UpperCAmelCase : Any = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
UpperCAmelCase : int = image_processing(images=lowerCAmelCase_ , annotations=lowerCAmelCase_ , return_tensors='pt' )
# verify pixel values
UpperCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase_ )
UpperCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
# verify area
UpperCAmelCase : Dict = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase_ ) )
# verify boxes
UpperCAmelCase : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase_ ) )
# verify is_crowd
UpperCAmelCase : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase_ ) )
# verify class_labels
UpperCAmelCase : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase_ ) )
# verify orig_size
UpperCAmelCase : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase_ ) )
# verify size
UpperCAmelCase : List[str] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase_ ) )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
# prepare image, target and masks_path
UpperCAmelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCAmelCase : str = json.loads(f.read() )
UpperCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
UpperCAmelCase : Dict = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCAmelCase : Union[str, Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
UpperCAmelCase : int = image_processing(images=lowerCAmelCase_ , annotations=lowerCAmelCase_ , masks_path=lowerCAmelCase_ , return_tensors='pt' )
# verify pixel values
UpperCAmelCase : Dict = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase_ )
UpperCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
# verify area
UpperCAmelCase : Dict = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase_ ) )
# verify boxes
UpperCAmelCase : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase_ )
UpperCAmelCase : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase : Any = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase_ ) )
# verify is_crowd
UpperCAmelCase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase_ ) )
# verify class_labels
UpperCAmelCase : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase_ ) )
# verify masks
UpperCAmelCase : Optional[Any] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCAmelCase_ )
# verify orig_size
UpperCAmelCase : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase_ ) )
# verify size
UpperCAmelCase : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase_ ) )
| 151 |
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 284 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline
lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
lowerCamelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
lowerCamelCase_ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCamelCase_ : List[str] = CLIPTextModel(lowerCAmelCase_ )
lowerCamelCase_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ (self , A , A=0 ):
lowerCamelCase_ : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
lowerCamelCase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : str = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
lowerCamelCase_ : List[str] = torch.manual_seed(lowerCAmelCase_ )
else:
lowerCamelCase_ : Dict = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
lowerCamelCase_ : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Optional[Any] = self.get_dummy_components()
lowerCamelCase_ : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
lowerCamelCase_ : Tuple = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCamelCase_ : Any = self.get_dummy_inputs(lowerCAmelCase_ )
lowerCamelCase_ : Optional[Any] = sd_pipe(**lowerCAmelCase_ ).images
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Optional[Any] = self.get_dummy_components()
lowerCamelCase_ : Any = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
lowerCamelCase_ : Optional[Any] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCamelCase_ : int = self.get_dummy_inputs(lowerCAmelCase_ )
lowerCamelCase_ : int = '''french fries'''
lowerCamelCase_ : str = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : Any = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Any = self.get_dummy_components()
lowerCamelCase_ : Tuple = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
lowerCamelCase_ : Tuple = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCamelCase_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
lowerCamelCase_ : List[str] = [inputs['''prompt''']] * 2
lowerCamelCase_ : Optional[Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
lowerCamelCase_ : List[Any] = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
lowerCamelCase_ : int = image / 2 + 0.5
lowerCamelCase_ : Tuple = image.permute(0 , 3 , 1 , 2 )
lowerCamelCase_ : Optional[int] = image.repeat(2 , 1 , 1 , 1 )
lowerCamelCase_ : Union[str, Any] = sd_pipe(**lowerCAmelCase_ ).images
lowerCamelCase_ : str = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Optional[int] = self.get_dummy_components()
lowerCamelCase_ : int = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
lowerCamelCase_ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
lowerCamelCase_ : Optional[int] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCamelCase_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
lowerCamelCase_ : List[Any] = sd_pipe(**lowerCAmelCase_ ).images
lowerCamelCase_ : str = image[0, -3:, -3:, -1]
lowerCamelCase_ : Optional[Any] = [round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_dummy_components()
lowerCamelCase_ : Any = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
lowerCamelCase_ : Optional[Any] = VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
lowerCamelCase_ : Any = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCamelCase_ : int = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='''pt''' ) )[0]
lowerCamelCase_ : Union[str, Any] = components['''vae''']
lowerCamelCase_ : Any = self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCamelCase_ : List[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCamelCase_ : List[Any] = pipe(**lowerCAmelCase_ )[0]
lowerCamelCase_ : int = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self , A=0 ):
lowerCamelCase_ : Any = torch.manual_seed(lowerCAmelCase_ )
lowerCamelCase_ : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
lowerCamelCase_ : str = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Union[str, Any] = self.get_inputs()
lowerCamelCase_ : int = pipe(**lowerCAmelCase_ ).images
lowerCamelCase_ : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ : Dict = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase_ )
lowerCamelCase_ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase_ : str = self.get_inputs()
lowerCamelCase_ : List[Any] = pipe(**lowerCAmelCase_ ).images
lowerCamelCase_ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ : Tuple = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase_ )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Any = self.get_inputs()
lowerCamelCase_ : List[str] = pipe(**lowerCAmelCase_ ).images
lowerCamelCase_ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ : Dict = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = 0
def callback_fn(A , A , A ) -> None:
lowerCamelCase_ : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ : str = latents[0, -3:, -3:, -1]
lowerCamelCase_ : List[str] = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowerCamelCase_ : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase_ : str = latents[0, -3:, -3:, -1]
lowerCamelCase_ : int = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowerCamelCase_ : str = False
lowerCamelCase_ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
lowerCamelCase_ : Tuple = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase__ (self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
lowerCamelCase_ : Any = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ : Dict = self.get_inputs()
lowerCamelCase_ : Any = pipe(**lowerCAmelCase_ )
lowerCamelCase_ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = inputs['''image'''].resize((5_0_4, 5_0_4) )
lowerCamelCase_ : str = '''timbrooks/instruct-pix2pix'''
lowerCamelCase_ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = pipe(**lowerCAmelCase_ )
lowerCamelCase_ : Union[str, Any] = output.images[0]
lowerCamelCase_ : Optional[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 318 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Any = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """pegasus"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , lowerCAmelCase_ : Union[str, Any]=5_0_2_6_5 , lowerCAmelCase_ : Union[str, Any]=1_0_2_4 , lowerCAmelCase_ : Union[str, Any]=1_2 , lowerCAmelCase_ : Dict=4_0_9_6 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : List[Any]=1_2 , lowerCAmelCase_ : Union[str, Any]=4_0_9_6 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=1_0_2_4 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Tuple , ) -> List[str]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
@property
def lowercase ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def lowercase ( self : Optional[Any] ) -> int:
return self.d_model
| 284 | 0 |
import torch
from torch import nn
class UpperCamelCase__ ( nn.Module ):
def __init__(self : int , snake_case_ : str , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Optional[Any]=1 , snake_case_ : Any=False ):
super().__init__()
__a : Optional[Any] = n_token
__a : Dict = d_embed
__a : int = d_proj
__a : List[str] = cutoffs + [n_token]
__a : Any = [0] + self.cutoffs
__a : Optional[int] = div_val
__a : str = self.cutoffs[0]
__a : List[Any] = len(self.cutoffs ) - 1
__a : str = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__a : Any = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__a : List[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
__a : Any = nn.ModuleList()
__a : Any = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
__a , __a : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a : Any = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
__a : Dict = keep_order
def lowerCAmelCase (self : Tuple , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : List[Any] ):
if proj is None:
__a : Union[str, Any] = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__a : Tuple = nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
__a : List[Any] = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int=None , snake_case_ : Any=False ):
if labels is not None:
# Shift so that tokens < n predict n
__a : int = hidden[..., :-1, :].contiguous()
__a : Optional[int] = labels[..., 1:].contiguous()
__a : str = hidden.view(-1 , hidden.size(-1 ) )
__a : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
__a : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__a : Dict = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__a : Optional[Any] = labels != -1_0_0
__a : int = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
__a : Dict = (
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__a : List[str] = nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
__a , __a : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__a , __a : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a : List[str] = self.out_layers[0].weight[l_idx:r_idx]
__a : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
__a : Dict = self.out_layers[i].weight
__a : Any = self.out_layers[i].bias
if i == 0:
__a : Any = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__a : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
__a , __a , __a : Optional[Any] = weights[0], biases[0], self.out_projs[0]
__a : str = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__a : Optional[Any] = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
__a : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__a : List[Any] = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
__a : Any = 0
__a : Optional[int] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
__a , __a : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__a : Optional[int] = (labels >= l_idx) & (labels < r_idx)
__a : Optional[Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__a : Union[str, Any] = labels.index_select(0 , lowerCAmelCase_ ) - l_idx
__a : Union[str, Any] = head_logprob.index_select(0 , lowerCAmelCase_ )
__a : Optional[int] = hidden.index_select(0 , lowerCAmelCase_ )
else:
__a : List[str] = hidden
if i == 0:
if labels is not None:
__a : Dict = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__a : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
__a , __a , __a : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
__a : Dict = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__a : Any = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
__a : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__a : List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__a : Optional[int] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__a : Tuple = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[str] ):
if self.n_clusters == 0:
__a : List[Any] = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
__a , __a : Optional[int] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__a , __a : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
__a : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
__a : Dict = self.out_layers[i].weight
__a : Optional[int] = self.out_layers[i].bias
if i == 0:
__a : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__a : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
__a , __a , __a : str = weights[0], biases[0], self.out_projs[0]
__a : Dict = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__a : str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__a : Any = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
__a : Tuple = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
__a , __a : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__a : Dict = head_logprob[:, : self.cutoffs[0]]
else:
__a , __a , __a : List[str] = weights[i], biases[i], self.out_projs[i]
__a : Optional[Any] = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__a : List[Any] = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
__a : Optional[Any] = head_logprob[:, -i] + tail_logprob_i
__a : Optional[int] = logprob_i
return out
| 216 |
def a_ ( lowerCAmelCase_ : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__lowerCAmelCase = 4
__lowerCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
__lowerCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 284 | 0 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCAmelCase__ = (boundary[1] - boundary[0]) / steps
UpperCAmelCase__ = boundary[0]
UpperCAmelCase__ = boundary[1]
UpperCAmelCase__ = make_points(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase__ = 0.0
y += (h / 2.0) * f(lowerCAmelCase_ )
for i in x_i:
# print(i)
y += h * f(lowerCAmelCase_ )
y += (h / 2.0) * f(lowerCAmelCase_ )
return y
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a + h
while x < (b - h):
yield x
UpperCAmelCase__ = x + h
def a_ ( lowerCamelCase ): # enter your function here
UpperCAmelCase__ = (x - 0) * (x - 0)
return y
def a_ ( ):
UpperCAmelCase__ = 0.0 # Lower bound of integration
UpperCAmelCase__ = 1.0 # Upper bound of integration
UpperCAmelCase__ = 10.0 # define number of steps or resolution
UpperCAmelCase__ = [a, b] # define boundary of integration
UpperCAmelCase__ = method_a(lowerCAmelCase_ , lowerCAmelCase_ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 98 |
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : list[int], lowerCAmelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
return min(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(lowerCAmelCase_ ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 284 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase__ = 'focalnet'
def __init__( self : List[Any] , _A : Optional[int]=224 , _A : int=4 , _A : Optional[int]=3 , _A : Tuple=96 , _A : Tuple=False , _A : List[str]=[192, 384, 768, 768] , _A : Any=[2, 2, 6, 2] , _A : Dict=[2, 2, 2, 2] , _A : Union[str, Any]=[3, 3, 3, 3] , _A : Dict="gelu" , _A : List[Any]=4.0 , _A : Any=0.0 , _A : Any=0.1 , _A : Any=False , _A : Dict=1e-4 , _A : Tuple=False , _A : Union[str, Any]=False , _A : Optional[int]=False , _A : Optional[int]=0.0_2 , _A : Union[str, Any]=1e-5 , _A : Union[str, Any]=32 , _A : Union[str, Any]=None , _A : Dict=None , **_A : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : Any = embed_dim
UpperCAmelCase__ : Dict = use_conv_embed
UpperCAmelCase__ : Optional[int] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = depths
UpperCAmelCase__ : Optional[int] = focal_levels
UpperCAmelCase__ : str = focal_windows
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : Dict = mlp_ratio
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Dict = drop_path_rate
UpperCAmelCase__ : Any = use_layerscale
UpperCAmelCase__ : List[str] = layerscale_value
UpperCAmelCase__ : int = use_post_layernorm
UpperCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation
UpperCAmelCase__ : int = normalize_modulator
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : Dict = encoder_stride
UpperCAmelCase__ : Union[str, Any] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 181 |
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 0 |
from manim import *
class _a ( _UpperCamelCase ):
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = Rectangle(height=0.5 , width=0.5 )
lowercase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
lowercase__ = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
lowercase__ = VGroup(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
lowercase__ = Text('''CPU''' , font_size=24 )
lowercase__ = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase_ )
lowercase__ = [mem.copy() for i in range(4 )]
lowercase__ = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
lowercase__ = Text('''GPU''' , font_size=24 )
lowercase__ = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase_ )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
lowercase__ = Text('''Model''' , font_size=24 )
lowercase__ = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase_ )
lowercase__ = []
for i, rect in enumerate(lowerCAmelCase_ ):
rect.set_stroke(lowerCAmelCase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowercase__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCAmelCase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCAmelCase_ , buff=0.0 )
self.add(lowerCAmelCase_ )
cpu_targs.append(lowerCAmelCase_ )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
lowercase__ = Text('''Loaded Checkpoint''' , font_size=24 )
lowercase__ = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , aligned_edge=lowerCAmelCase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowercase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowerCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowercase__ = MarkupText(
f'Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase_ ) , Write(lowerCAmelCase_ ) )
self.play(Write(lowerCAmelCase_ , run_time=1 ) , Create(lowerCAmelCase_ , run_time=1 ) )
lowercase__ = []
lowercase__ = []
for i, rect in enumerate(lowerCAmelCase_ ):
lowercase__ = fill.copy().set_fill(lowerCAmelCase_ , opacity=0.7 )
target.move_to(lowerCAmelCase_ )
first_animations.append(GrowFromCenter(lowerCAmelCase_ , run_time=1 ) )
lowercase__ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCAmelCase_ , run_time=1.5 ) )
self.play(*lowerCAmelCase_ )
self.play(*lowerCAmelCase_ )
self.wait()
| 110 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 284 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase = 'pt'
elif is_tf_available():
UpperCamelCase = 'tf'
else:
UpperCamelCase = 'jax'
class __UpperCAmelCase (_UpperCamelCase ,unittest.TestCase ):
__snake_case : Optional[int] = ByTaTokenizer
__snake_case : Tuple = False
def UpperCamelCase ( self: Any ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self: str ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase ( self: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any]=False , UpperCAmelCase_: Dict=20 , UpperCAmelCase_: List[str]=5 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for i in range(len(lowerCAmelCase_ ) ):
try:
_SCREAMING_SNAKE_CASE = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_SCREAMING_SNAKE_CASE = list(filter(lambda UpperCAmelCase_ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , lowerCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = list(filter(lambda UpperCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase_ ) , lowerCAmelCase_ ) )
if max_length is not None and len(lowerCAmelCase_ ) > max_length:
_SCREAMING_SNAKE_CASE = toks[:max_length]
if min_length is not None and len(lowerCAmelCase_ ) < min_length and len(lowerCAmelCase_ ) > 0:
while len(lowerCAmelCase_ ) < min_length:
_SCREAMING_SNAKE_CASE = toks + toks
# toks_str = [t[1] for t in toks]
_SCREAMING_SNAKE_CASE = [t[0] for t in toks]
# Ensure consistency
_SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
if " " not in output_txt and len(lowerCAmelCase_ ) > 1:
_SCREAMING_SNAKE_CASE = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase_ )
)
if with_prefix_space:
_SCREAMING_SNAKE_CASE = """ """ + output_txt
_SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
return output_txt, output_ids
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_SCREAMING_SNAKE_CASE = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = """Unicode €."""
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , lowerCAmelCase_ )
# decoding
_SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , """Unicode €.</s>""" )
_SCREAMING_SNAKE_CASE = tokenizer("""e è é ê ë""" )
_SCREAMING_SNAKE_CASE = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , lowerCAmelCase_ )
# decoding
_SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
if FRAMEWORK != "jax":
_SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] )
else:
_SCREAMING_SNAKE_CASE = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowerCAmelCase_ )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertNotIn("""decoder_input_ids""" , lowerCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" , lowerCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = [
"""Summary of the text.""",
"""Another summary.""",
]
_SCREAMING_SNAKE_CASE = tokenizer(
text_target=lowerCAmelCase_ , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE = ["""A long paragraph for summarization. </s>"""]
_SCREAMING_SNAKE_CASE = ["""Summary of the text. </s>"""]
# fmt: off
_SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_SCREAMING_SNAKE_CASE = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , text_target=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch["""input_ids"""][0] )
self.assertEqual(lowerCAmelCase_ , batch["""labels"""][0] )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = """ He is very happy, UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = [F'<extra_id_{i}>' for i in range(125 )]
_SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
lowerCAmelCase_ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowerCAmelCase_ )]
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(lowerCAmelCase_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizers(fast=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_SCREAMING_SNAKE_CASE = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_SCREAMING_SNAKE_CASE = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [] )
setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 306 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple=1024, lowerCAmelCase_ : Optional[Any]=1024, lowerCAmelCase_ : Tuple=False, **lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='train', **lowerCAmelCase_ )
__lowerCAmelCase = tok.pad_token_id
def get_lens(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = tqdm(
DataLoader(lowerCAmelCase_, batch_size=512, num_workers=8, shuffle=lowerCAmelCase_, collate_fn=ds.collate_fn ), desc=str(ds.len_file ), )
__lowerCAmelCase = []
for batch in dl:
__lowerCAmelCase = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
__lowerCAmelCase = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_, lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_, lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='val', **lowerCAmelCase_ )
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_, train_ds.len_file )
pickle_save(lowerCAmelCase_, val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 284 | 0 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
lowerCAmelCase__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
lowerCAmelCase__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__(datasets.Metric ):
"""simple docstring"""
def snake_case ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Union[str, Any] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowercase__ : Any = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowercase__ : str = evaluate(dataset=lowerCAmelCase_ , predictions=lowerCAmelCase_ )
return score
| 130 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : List[Any]=None ):
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCAmelCase_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
a_ = OPTConfig
a_ = {}
a_ = """gelu"""
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Any=9_9 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=2_0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_6 , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = embed_dim
__lowerCAmelCase = word_embed_proj_dim
__lowerCAmelCase = False
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
__lowerCAmelCase = prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = TFOPTModel(config=lowerCAmelCase_ )
__lowerCAmelCase = inputs_dict['input_ids']
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict['attention_mask'][:1, :]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a_ = (TFOPTForCausalLM,) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
a_ = 10
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFOPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
__lowerCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
__lowerCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return tf.constant(lowerCAmelCase_, dtype=tf.intaa )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = 99
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
__lowerCAmelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
__lowerCAmelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Dict:
super().setUp()
__lowerCAmelCase = 'facebook/opt-350m'
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Optional[int] ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase ( self : int ) -> str:
__lowerCAmelCase = 'facebook/opt-125m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = 'left'
# use different length sentences to test batching
__lowerCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ )
__lowerCAmelCase = inputs['input_ids']
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs['attention_mask'] )
__lowerCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ )
__lowerCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
__lowerCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 284 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case : Union[str, Any] ={
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] =['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =[
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] =[
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['LayoutLMv3FeatureExtractor']
__snake_case : str =['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__snake_case : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['LayoutLMv3FeatureExtractor']
_snake_case : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
return 1 if input_a == input_a else 0
def __UpperCAmelCase ( ):
assert xnor_gate(0, 0 ) == 1
assert xnor_gate(0, 1 ) == 0
assert xnor_gate(1, 0 ) == 0
assert xnor_gate(1, 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1)) | 145 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 284 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A: Optional[int] = logging.get_logger(__name__)
A: Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
A: Optional[int] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ):
for attribute in key.split(""".""" ):
UpperCAmelCase : Any = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
UpperCAmelCase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
UpperCAmelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase : List[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCAmelCase : str = value
elif weight_type == "bias":
UpperCAmelCase : List[Any] = value
else:
UpperCAmelCase : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _snake_case ( UpperCamelCase : int , UpperCamelCase : List[str] ):
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Optional[int] = fairseq_model.state_dict()
UpperCAmelCase : Optional[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase : Union[str, Any] = None
for name, value in fairseq_dict.items():
UpperCAmelCase : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase : int = True
elif name.split(""".""" )[0] == "proj":
UpperCAmelCase : Tuple = fairseq_model.proj
UpperCAmelCase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase : List[str] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
UpperCAmelCase : List[str] = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "weight_g" in name:
UpperCAmelCase : Optional[int] = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase : Optional[int] = """weight_v"""
elif "bias" in name:
UpperCAmelCase : List[Any] = """bias"""
elif "weight" in name:
UpperCAmelCase : Tuple = """weight"""
else:
UpperCAmelCase : List[str] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] ):
UpperCAmelCase : Tuple = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase : List[str] = name.split(""".""" )
UpperCAmelCase : str = int(items[0] )
UpperCAmelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
def _snake_case ( UpperCamelCase : List[str] ):
UpperCAmelCase , UpperCAmelCase : Tuple = emb.weight.shape
UpperCAmelCase : int = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
UpperCAmelCase : Tuple = emb.weight.data
return lin_layer
def _snake_case ( UpperCamelCase : Optional[Any] ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : Optional[Any] = f.readlines()
UpperCAmelCase : str = [line.split(""" """ )[0] for line in lines]
UpperCAmelCase : List[Any] = len(lowerCAmelCase_ )
UpperCAmelCase : Dict = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(lowerCAmelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , ):
UpperCAmelCase : Optional[int] = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase : Dict = SpeechaTextaConfig.from_pretrained(
lowerCAmelCase_ , vocab_size=lowerCAmelCase_ , decoder_layers=lowerCAmelCase_ , do_stable_layer_norm=lowerCAmelCase_ )
UpperCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
UpperCAmelCase : List[str] = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase : Optional[Any] = WavaVecaModel(lowerCAmelCase_ )
UpperCAmelCase : List[Any] = recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = SpeechaTextaForCausalLM(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase_ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
UpperCAmelCase : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
UpperCAmelCase : int = SpeechEncoderDecoderModel(encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
UpperCAmelCase : str = False
# add projection layer
UpperCAmelCase : Optional[int] = nn.Parameter(projection_layer.weight )
UpperCAmelCase : Optional[int] = nn.Parameter(projection_layer.bias )
UpperCAmelCase : Tuple = create_vocab_dict(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """vocab.json""" ) , """w""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : str = SpeechaTextaTokenizer(os.path.join(lowerCAmelCase_ , """vocab.json""" ) )
tokenizer.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase : List[Any] = hf_wavavec.config.to_dict()
UpperCAmelCase : Any = tokenizer.pad_token_id
UpperCAmelCase : str = tokenizer.bos_token_id
UpperCAmelCase : Union[str, Any] = tokenizer.eos_token_id
UpperCAmelCase : int = """speech_to_text_2"""
UpperCAmelCase : Optional[int] = """wav2vec2"""
UpperCAmelCase : Any = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase_ )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
feature_extractor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
A: Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
A: Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 109 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_snake_case : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_snake_case : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_snake_case : set[int] = {ord(char) for char in VALID_CHARS}
_snake_case : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCAmelCase_ : list[int], lowerCAmelCase_ : tuple[int, ...] ):
__lowerCAmelCase = ""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ), lowerCAmelCase_ ):
__lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def a_ ( lowerCAmelCase_ : list[int] ):
__lowerCAmelCase = []
for key in product(lowerCAmelCase_, repeat=3 ):
__lowerCAmelCase = try_key(lowerCAmelCase_, lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def a_ ( lowerCAmelCase_ : list[str], lowerCAmelCase_ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCAmelCase_ : str = "p059_cipher.txt" ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding='utf-8' )
__lowerCAmelCase = [int(lowerCAmelCase_ ) for number in data.strip().split(',' )]
__lowerCAmelCase = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
__lowerCAmelCase = filter_common_word(lowerCAmelCase_, lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
__lowerCAmelCase = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 284 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
UpperCAmelCase : Optional[Any] = gray_code_sequence_string(lowerCAmelCase_ )
#
# convert them to integers
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase : Optional[Any] = int(sequence[i] , 2 )
return sequence
def UpperCamelCase( UpperCAmelCase_ ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase : Optional[int] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase : str = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase : List[Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase : Union[str, Any] = '0' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase : List[str] = '1' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_snake_case : Tuple = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_snake_case : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a_ ( lowerCAmelCase_ : str ):
if "://" in dataset_path:
__lowerCAmelCase = dataset_path.split('://' )[1]
return dataset_path
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = not is_remote_filesystem(lowerCAmelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase_ ), fs._strip_protocol(lowerCAmelCase_ ) )
else:
fs.mv(lowerCAmelCase_, lowerCAmelCase_, recursive=lowerCAmelCase_ )
def a_ ( ):
if hasattr(fsspec.asyn, 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = threading.Lock()
| 284 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Tuple = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int] ):
if metric == "rouge2":
__lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__lowerCAmelCase = ModelCheckpoint(
dirpath=lowerCAmelCase_, filename=lowerCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ):
return EarlyStopping(
monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase_, verbose=lowerCAmelCase_, )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / 'test_results.txt'
__lowerCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'a+' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Dict:
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , 'test' )
@rank_zero_only
def lowercase ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Any ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 284 | 0 |
import math
from datetime import datetime, timedelta
def __UpperCamelCase ( lowerCAmelCase__ : int ):
__a : str = year % 1_9
__a : Any = year % 4
__a : List[str] = year % 7
__a : int = math.floor(year / 1_0_0 )
__a : Union[str, Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Dict = leap_day_inhibits / 4
__a : int = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Any = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : Tuple = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase_ , 4 , 1_8 )
else:
return datetime(lowerCAmelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
lowercase__ ='will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 216 |
import re
from filelock import FileLock
try:
import nltk
_snake_case : Any = True
except (ImportError, ModuleNotFoundError):
_snake_case : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def a_ ( lowerCAmelCase_ : str ):
re.sub('<n>', '', lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 284 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class snake_case ( _UpperCamelCase ):
"""simple docstring"""
snake_case__ = "camembert"
def __init__( self : Any ,lowerCamelCase__ : List[str]=30_522 ,lowerCamelCase__ : str=768 ,lowerCamelCase__ : Union[str, Any]=12 ,lowerCamelCase__ : List[Any]=12 ,lowerCamelCase__ : str=3_072 ,lowerCamelCase__ : Optional[Any]="gelu" ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : List[Any]=2 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : Any=1e-12 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Optional[Any]=0 ,lowerCamelCase__ : Optional[Any]=2 ,lowerCamelCase__ : str="absolute" ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : List[Any]=None ,**lowerCamelCase__ : Dict ,):
super().__init__(pad_token_id=lowerCAmelCase_ ,bos_token_id=lowerCAmelCase_ ,eos_token_id=lowerCAmelCase_ ,**lowerCAmelCase_ )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = classifier_dropout
class snake_case ( _UpperCamelCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Dict ):
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 98 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : List[Any] = OmegaConf.load(lowerCAmelCase_ )
UpperCAmelCase__ : Union[str, Any] = torch.load(lowerCAmelCase_ , map_location='''cpu''' )['''model''']
UpperCAmelCase__ : Union[str, Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : int = '''first_stage_model.'''
for key in keys:
if key.startswith(lowerCAmelCase_ ):
UpperCAmelCase__ : Union[str, Any] = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(lowerCAmelCase_ ):
UpperCAmelCase__ : str = state_dict[key]
UpperCAmelCase__ : int = config.model.params.first_stage_config.params
UpperCAmelCase__ : List[Any] = config.model.params.unet_config.params
UpperCAmelCase__ : int = VQModel(**lowerCAmelCase_ ).eval()
vqvae.load_state_dict(lowerCAmelCase_ )
UpperCAmelCase__ : str = UNetLDMModel(**lowerCAmelCase_ ).eval()
unet.load_state_dict(lowerCAmelCase_ )
UpperCAmelCase__ : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCAmelCase_ , )
UpperCAmelCase__ : List[str] = LDMPipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
pipeline.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
UpperCamelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 181 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_snake_case : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
_snake_case : str = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
_snake_case : List[str] = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def lowercase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
__lowerCAmelCase = 0.0
for i, j in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase_ , lowerCAmelCase_ ) else 0.0
__lowerCAmelCase = n_correct / len(lowerCAmelCase_ )
return {
"accuracy": accuracy,
}
| 284 | 0 |
import os
from collections.abc import Iterator
def _a ( SCREAMING_SNAKE_CASE = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(lowerCAmelCase_ ):
lowercase__ = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCAmelCase_ )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ).lstrip('''./''' )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return f'{i * " "}*' if i else "\n##"
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCAmelCase_ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(lowerCAmelCase_ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _a ( SCREAMING_SNAKE_CASE = "." ):
"""simple docstring"""
lowercase__ = ''''''
for filepath in sorted(good_file_paths(lowerCAmelCase_ ) ):
lowercase__ , lowercase__ = os.path.split(lowerCAmelCase_ )
if filepath != old_path:
lowercase__ = print_path(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase__ = f'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
lowercase__ = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'{md_prefix(lowerCAmelCase_ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 110 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowerCAmelCase = CLIPTextModel(lowerCAmelCase_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=0 ) -> Dict:
__lowerCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' )
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = 'french fries'
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = [inputs['prompt']] * 2
__lowerCAmelCase = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
__lowerCAmelCase = image / 2 + 0.5
__lowerCAmelCase = image.permute(0 , 3 , 1 , 2 )
__lowerCAmelCase = image.repeat(2 , 1 , 1 , 1 )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = [round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' ) )[0]
__lowerCAmelCase = components['vae']
__lowerCAmelCase = self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowerCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )[0]
__lowerCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] , lowerCAmelCase_ : List[Any]=0 ) -> Any:
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
__lowerCAmelCase = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> Dict:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = 0
def callback_fn(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor ) -> None:
__lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__lowerCAmelCase = False
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase ( self : Optional[int] ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCAmelCase = inputs['image'].resize((5_0_4, 5_0_4) )
__lowerCAmelCase = 'timbrooks/instruct-pix2pix'
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__lowerCAmelCase = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 284 | 0 |
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 306 |
from timeit import timeit
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a_ ( ):
def do_benchmark(lowerCAmelCase_ : int ) -> None:
__lowerCAmelCase = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit('z.get_set_bits_count_using_modulo_operator(25)', setup=lowerCAmelCase_ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)', setup=lowerCAmelCase_, )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 284 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 130 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case : Dict = pytest.mark.integration
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCAmelCase_ ) for x in np.arange(3_0 ).tolist()]} )
return dset
def lowercase ( self : List[str] ) -> Tuple:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
__lowerCAmelCase = dset.map(
lambda lowerCAmelCase_ , lowerCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ )
__lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase ( self : Optional[Any] ) -> str:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : int ) -> Optional[Any]:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCAmelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase ( self : Union[str, Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search_batch , queries[0] )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> List[str]:
import faiss
__lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase ( self : Union[str, Any] ) -> Dict:
import faiss
__lowerCAmelCase = faiss.IndexFlat(5 )
__lowerCAmelCase = FaissIndex(custom_index=lowerCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase ( self : str ) -> Any:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
__lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__lowerCAmelCase = 'index.faiss'
__lowerCAmelCase = F"""mock://{index_name}"""
index.save(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = FaissIndex.load(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = np.zeros(5, dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> int:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = Elasticsearch()
__lowerCAmelCase = {'acknowledged': True}
__lowerCAmelCase = ElasticSearchIndex(es_client=lowerCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
# batched queries with timeout
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ , request_timeout=3_0 )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
| 284 | 0 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__snake_case : Tuple =True
except (ImportError, AttributeError):
__snake_case : Any =object
def lowerCAmelCase__ ( *lowerCamelCase_ : Optional[Any] ,**lowerCamelCase_ : int):
'''simple docstring'''
pass
__snake_case : Any =False
__snake_case : Optional[int] =logging.get_logger('transformers-cli/serving')
def lowerCAmelCase__ ( lowerCamelCase_ : Namespace):
'''simple docstring'''
lowerCAmelCase__ : Dict = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
return ServeCommand(lowerCAmelCase_ ,args.host ,args.port ,args.workers)
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
snake_case_ =42
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
snake_case_ =42
snake_case_ =42
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
snake_case_ =42
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
snake_case_ =42
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = parser.add_parser(
'''serve''' ,help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' ,type=lowerCAmelCase_ ,choices=get_supported_tasks() ,help='''The task to run the pipeline on''' ,)
serve_parser.add_argument('''--host''' ,type=lowerCAmelCase_ ,default='''localhost''' ,help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' ,type=lowerCAmelCase_ ,default=88_88 ,help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' ,type=lowerCAmelCase_ ,default=1 ,help='''Number of http workers''' )
serve_parser.add_argument('''--model''' ,type=lowerCAmelCase_ ,help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' ,type=lowerCAmelCase_ ,help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' ,type=lowerCAmelCase_ ,help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' ,type=lowerCAmelCase_ ,default=-1 ,help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' ,)
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = pipeline
lowerCAmelCase__ : Tuple = host
lowerCAmelCase__ : Tuple = port
lowerCAmelCase__ : Optional[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
lowerCAmelCase__ : str = FastAPI(
routes=[
APIRoute(
'''/''' ,self.model_info ,response_model=lowerCAmelCase_ ,response_class=lowerCAmelCase_ ,methods=['''GET'''] ,),
APIRoute(
'''/tokenize''' ,self.tokenize ,response_model=lowerCAmelCase_ ,response_class=lowerCAmelCase_ ,methods=['''POST'''] ,),
APIRoute(
'''/detokenize''' ,self.detokenize ,response_model=lowerCAmelCase_ ,response_class=lowerCAmelCase_ ,methods=['''POST'''] ,),
APIRoute(
'''/forward''' ,self.forward ,response_model=lowerCAmelCase_ ,response_class=lowerCAmelCase_ ,methods=['''POST'''] ,),
] ,timeout=6_00 ,)
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
run(self._app ,host=self.host ,port=self.port ,workers=self.workers )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCAmelCase__ (self ,__lowerCamelCase = Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ,__lowerCamelCase = Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ) -> Any:
"""simple docstring"""
try:
lowerCAmelCase__ : Dict = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
lowerCAmelCase__ : List[Any] = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ ,tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 ,detail={'''model''': '''''', '''error''': str(lowerCAmelCase_ )} )
def lowerCAmelCase__ (self ,__lowerCamelCase = Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ,__lowerCamelCase = Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ,__lowerCamelCase = Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ,) -> Optional[Any]:
"""simple docstring"""
try:
lowerCAmelCase__ : int = self._pipeline.tokenizer.decode(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
return ServeDeTokenizeResult(model='''''' ,text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_00 ,detail={'''model''': '''''', '''error''': str(lowerCAmelCase_ )} )
async def lowerCAmelCase__ (self ,__lowerCamelCase=Body(lowerCAmelCase_ ,embed=lowerCAmelCase_ ) ) -> Optional[int]:
"""simple docstring"""
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] ,attention=[] )
try:
# Forward through the model
lowerCAmelCase__ : Optional[int] = self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(5_00 ,{'''error''': str(lowerCAmelCase_ )} )
| 129 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected', [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase_, i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0, 4 ), range(4, 7 ), range(7, 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = _distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected', [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = _split_gen_kwargs(lowerCAmelCase_, lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected', [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
], )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any ):
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
__lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 284 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class A__ ( _UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''gptj'''
UpperCamelCase_ : Any = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , lowerCAmelCase__ : List[Any]=5_0_4_0_0 , lowerCAmelCase__ : Optional[int]=2_0_4_8 , lowerCAmelCase__ : Optional[int]=4_0_9_6 , lowerCAmelCase__ : Tuple=2_8 , lowerCAmelCase__ : Optional[Any]=1_6 , lowerCAmelCase__ : Tuple=6_4 , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Union[str, Any]="gelu_new" , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Union[str, Any]=1e-5 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Optional[int]=5_0_2_5_6 , lowerCAmelCase__ : Optional[int]=5_0_2_5_6 , lowerCAmelCase__ : Optional[int]=False , **lowerCAmelCase__ : Dict , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Dict = n_positions
_UpperCAmelCase : List[str] = n_embd
_UpperCAmelCase : Dict = n_layer
_UpperCAmelCase : List[str] = n_head
_UpperCAmelCase : int = n_inner
_UpperCAmelCase : Tuple = rotary_dim
_UpperCAmelCase : Any = activation_function
_UpperCAmelCase : Union[str, Any] = resid_pdrop
_UpperCAmelCase : Optional[Any] = embd_pdrop
_UpperCAmelCase : Any = attn_pdrop
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Tuple = use_cache
_UpperCAmelCase : int = bos_token_id
_UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ )
class A__ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : PretrainedConfig , lowerCAmelCase__ : str = "default" , lowerCAmelCase__ : List[PatchingSpec] = None , lowerCAmelCase__ : bool = False , ) -> Any:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , task=lowerCAmelCase_ , patching_specs=lowerCAmelCase_ , use_past=lowerCAmelCase_ )
if not getattr(self._config , "pad_token_id" , lowerCAmelCase_ ):
# TODO: how to do that better?
_UpperCAmelCase : int = 0
@property
def _lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" )
_UpperCAmelCase : Dict = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return self._config.n_head
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase : Optional[Any] = seqlen + 2
_UpperCAmelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCAmelCase : Union[str, Any] = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_UpperCAmelCase : str = common_inputs["attention_mask"]
if self.use_past:
_UpperCAmelCase : Union[str, Any] = ordered_inputs["attention_mask"].dtype
_UpperCAmelCase : int = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return 1_3 | 145 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
a_ = True
def lowercase ( self : Optional[Any] ) -> List[str]:
super().setUp()
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> str:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase ( self : List[Any] , lowerCAmelCase_ : str ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowercase ( self : List[str] ) -> Optional[int]:
pass # TODO add if relevant
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def lowercase ( self : Union[str, Any] ) -> Any:
pass # TODO add if relevant
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : List[Any] ) -> int:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : int ) -> str:
__lowerCAmelCase = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_jumanpp
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = JumanppTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = JumanppTokenizer(normalize_text=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
__lowerCAmelCase = tokenizer.subword_tokenizer
__lowerCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowerCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
__lowerCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowerCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
def lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : str , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase ( self : Dict ) -> str:
pass # TODO add if relevant
def lowercase ( self : Any ) -> str:
pass # TODO add if relevant
def lowercase ( self : List[Any] ) -> int:
pass # TODO add if relevant
def lowercase ( self : str ) -> str:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowerCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
__lowerCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 284 | 0 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A: Dict = 6_3_7_8_1_3_7.0
A: str = 6_3_5_6_7_5_2.3_1_4_2_4_5
A: Any = 6_3_7_8_1_3_7
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
UpperCAmelCase : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase : Union[str, Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase : str = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase : Any = (b_lata + b_lata) / 2
UpperCAmelCase : Optional[int] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase : Optional[Any] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
UpperCAmelCase : List[str] = cos(sigma / 2 ) ** 2
UpperCAmelCase : Union[str, Any] = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase : List[Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
UpperCAmelCase : Optional[Any] = sin(sigma / 2 ) ** 2
UpperCAmelCase : Any = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """beit"""
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=8_1_9_2 , lowerCAmelCase_ : Optional[int]=7_6_8 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Any=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=[3, 5, 7, 1_1] , lowerCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=0.4 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=2_5_5 , **lowerCAmelCase_ : Any , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : Optional[Any] ) -> float:
return 1e-4
| 284 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase__ = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
lowercase__ = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
lowercase__ = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def UpperCAmelCase_ ( self : Any , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = len(references[0] )
if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
UpperCAmelCase : Optional[Any] = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )]
UpperCAmelCase : Any = TER(
normalized=lowerCAmelCase_ , no_punct=lowerCAmelCase_ , asian_support=lowerCAmelCase_ , case_sensitive=lowerCAmelCase_ , )
UpperCAmelCase : int = sb_ter.corpus_score(lowerCAmelCase_ , lowerCAmelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 151 |
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 284 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
if "resnet-50" in model_name:
lowerCamelCase_ : int = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
lowerCamelCase_ : str = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
lowerCamelCase_ : Optional[int] = DetrConfig(use_timm_backbone=lowerCAmelCase_ , backbone_config=lowerCAmelCase_ )
# set label attributes
lowerCamelCase_ : int = '''panoptic''' in model_name
if is_panoptic:
lowerCamelCase_ : Dict = 250
else:
lowerCamelCase_ : str = 91
lowerCamelCase_ : Dict = '''huggingface/label-files'''
lowerCamelCase_ : int = '''coco-detection-id2label.json'''
lowerCamelCase_ : Dict = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCamelCase_ : Optional[Any] = idalabel
lowerCamelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowercase_ ( _lowercase ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : Dict = state_dict.pop(lowerCAmelCase_ )
lowerCamelCase_ : List[Any] = val
def lowercase_ ( _lowercase , _lowercase=False ) -> Any:
'''simple docstring'''
lowerCamelCase_ : List[str] = ''''''
if is_panoptic:
lowerCamelCase_ : Union[str, Any] = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase_ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase_ : int = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : Any = in_proj_weight[:256, :]
lowerCamelCase_ : int = in_proj_bias[:256]
lowerCamelCase_ : Union[str, Any] = in_proj_weight[256:512, :]
lowerCamelCase_ : List[str] = in_proj_bias[256:512]
lowerCamelCase_ : Optional[int] = in_proj_weight[-256:, :]
lowerCamelCase_ : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase_ : Optional[int] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase_ : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : Tuple = in_proj_weight[:256, :]
lowerCamelCase_ : Optional[int] = in_proj_bias[:256]
lowerCamelCase_ : Any = in_proj_weight[256:512, :]
lowerCamelCase_ : int = in_proj_bias[256:512]
lowerCamelCase_ : Dict = in_proj_weight[-256:, :]
lowerCamelCase_ : Any = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCamelCase_ : Optional[int] = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCamelCase_ : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCamelCase_ : Any = in_proj_weight_cross_attn[:256, :]
lowerCamelCase_ : str = in_proj_bias_cross_attn[:256]
lowerCamelCase_ : str = in_proj_weight_cross_attn[256:512, :]
lowerCamelCase_ : Tuple = in_proj_bias_cross_attn[256:512]
lowerCamelCase_ : Optional[Any] = in_proj_weight_cross_attn[-256:, :]
lowerCamelCase_ : List[Any] = in_proj_bias_cross_attn[-256:]
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Union[str, Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase=None , _lowercase=False ) -> List[str]:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_ : Optional[int] = get_detr_config(lowerCAmelCase_ )
# load original model from torch hub
lowerCamelCase_ : Any = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"""Converting model {model_name}...""" )
lowerCamelCase_ : Optional[int] = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=lowerCAmelCase_ ).eval()
lowerCamelCase_ : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCAmelCase_ ):
if is_panoptic:
lowerCamelCase_ : List[str] = '''detr.''' + src
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase_ , is_panoptic=lowerCAmelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase_ : int = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowerCamelCase_ : List[Any] = state_dict.pop(lowerCAmelCase_ )
lowerCamelCase_ : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase_ : int = state_dict.pop(lowerCAmelCase_ )
lowerCamelCase_ : str = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowerCamelCase_ : List[Any] = state_dict.pop(lowerCAmelCase_ )
lowerCamelCase_ : Optional[int] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowerCamelCase_ : Any = state_dict.pop(lowerCAmelCase_ )
lowerCamelCase_ : List[str] = val
# finally, create HuggingFace model and load state dict
lowerCamelCase_ : str = DetrForSegmentation(lowerCAmelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# verify our conversion on an image
lowerCamelCase_ : Any = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowerCamelCase_ : Dict = DetrImageProcessor(format=lowerCAmelCase_ )
lowerCamelCase_ : Optional[Any] = processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = encoding['''pixel_values''']
lowerCamelCase_ : Union[str, Any] = detr(lowerCAmelCase_ )
lowerCamelCase_ : Optional[Any] = model(lowerCAmelCase_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
__lowercase : Any = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 318 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Any = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """pegasus"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , lowerCAmelCase_ : Union[str, Any]=5_0_2_6_5 , lowerCAmelCase_ : Union[str, Any]=1_0_2_4 , lowerCAmelCase_ : Union[str, Any]=1_2 , lowerCAmelCase_ : Dict=4_0_9_6 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : List[Any]=1_2 , lowerCAmelCase_ : Union[str, Any]=4_0_9_6 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=1_0_2_4 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Tuple , ) -> List[str]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
@property
def lowercase ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def lowercase ( self : Optional[Any] ) -> int:
return self.d_model
| 284 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
__a : str = filter(lambda lowerCAmelCase__ : p.requires_grad , model.parameters() )
__a : Any = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase__ =logging.getLogger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ):
if metric == "rouge2":
__a : int = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__a : Optional[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__a : int = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
''' function.''' )
__a : int = ModelCheckpoint(
dirpath=lowerCAmelCase_ , filename=lowerCAmelCase_ , monitor=f"val_{metric}" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ):
return EarlyStopping(
monitor=f"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowerCAmelCase_ , verbose=lowerCAmelCase_ , )
class UpperCamelCase__ ( pl.Callback ):
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] , snake_case_ : int ):
__a : Tuple = {f"lr_group_{i}": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def lowerCAmelCase (self : Optional[int] , snake_case_ : pl.Trainer , snake_case_ : pl.LightningModule , snake_case_ : str , snake_case_ : List[Any]=True ):
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__a : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__a : Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
__a : Optional[Any] = od / '''test_results.txt'''
__a : Union[str, Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__a : Dict = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__a : Union[str, Any] = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''a+''' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__a : Tuple = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
__a : Tuple = val.item()
__a : List[str] = f"{key}: {val:.6f}\n"
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
__a : int = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCAmelCase_ )
@rank_zero_only
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] ):
try:
__a : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
__a : int = pl_module.model.num_parameters()
__a : Any = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase (self : int , snake_case_ : pl.Trainer , snake_case_ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , '''test''' )
@rank_zero_only
def lowerCAmelCase (self : List[Any] , snake_case_ : pl.Trainer , snake_case_ : Any ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 216 |
def a_ ( lowerCAmelCase_ : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__lowerCAmelCase = 4
__lowerCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
__lowerCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 284 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class snake_case ( _UpperCamelCase ):
"""simple docstring"""
snake_case__ = "mobilenet_v1"
def __init__( self : Optional[Any] ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Tuple=224 ,lowerCamelCase__ : List[Any]=1.0 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : int="relu6" ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[Any]=0.9_9_9 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : Union[str, Any]=0.0_0_1 ,**lowerCamelCase__ : Tuple ,):
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = depth_multiplier
UpperCAmelCase__ = min_depth
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = tf_padding
UpperCAmelCase__ = classifier_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
class snake_case ( _UpperCamelCase ):
"""simple docstring"""
snake_case__ = version.parse("1.11" )
@property
def __lowerCAmelCase ( self : int ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def __lowerCAmelCase ( self : List[Any] ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def __lowerCAmelCase ( self : Dict ):
return 1e-4
| 98 |
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : list[int], lowerCAmelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
return min(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(lowerCAmelCase_ ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 284 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 |
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCamelCase_ ( self: int , UpperCamelCase_: int , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: Tuple="auto" , UpperCamelCase_: int=-1 , UpperCamelCase_: Optional[Any]=0.9 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: int=500 , UpperCamelCase_: Any="gpt2-large" , UpperCamelCase_: Any=-1 , UpperCamelCase_: Dict=1_024 , UpperCamelCase_: Optional[Any]=25 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: int=True , UpperCamelCase_: Dict=25 , ) -> Tuple:
"""simple docstring"""
lowercase__ = compute_mauve(
p_text=lowerCAmelCase_ , q_text=lowerCAmelCase_ , p_features=lowerCAmelCase_ , q_features=lowerCAmelCase_ , p_tokens=lowerCAmelCase_ , q_tokens=lowerCAmelCase_ , num_buckets=lowerCAmelCase_ , pca_max_data=lowerCAmelCase_ , kmeans_explained_var=lowerCAmelCase_ , kmeans_num_redo=lowerCAmelCase_ , kmeans_max_iter=lowerCAmelCase_ , featurize_model_name=lowerCAmelCase_ , device_id=lowerCAmelCase_ , max_text_length=lowerCAmelCase_ , divergence_curve_discretization_size=lowerCAmelCase_ , mauve_scaling_factor=lowerCAmelCase_ , verbose=lowerCAmelCase_ , seed=lowerCAmelCase_ , )
return out
| 110 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 284 | 0 |
import numpy as np
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple=1024, lowerCAmelCase_ : Optional[Any]=1024, lowerCAmelCase_ : Tuple=False, **lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='train', **lowerCAmelCase_ )
__lowerCAmelCase = tok.pad_token_id
def get_lens(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = tqdm(
DataLoader(lowerCAmelCase_, batch_size=512, num_workers=8, shuffle=lowerCAmelCase_, collate_fn=ds.collate_fn ), desc=str(ds.len_file ), )
__lowerCAmelCase = []
for batch in dl:
__lowerCAmelCase = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
__lowerCAmelCase = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_, lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_, lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='val', **lowerCAmelCase_ )
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_, train_ds.len_file )
pickle_save(lowerCAmelCase_, val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 284 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.model'}
lowerCAmelCase__ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
lowerCAmelCase__ = {
'google/rembert': 2_5_6,
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]="[CLS]" , SCREAMING_SNAKE_CASE : Tuple="[SEP]" , SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , SCREAMING_SNAKE_CASE : List[Any]="[SEP]" , SCREAMING_SNAKE_CASE : Any="[PAD]" , SCREAMING_SNAKE_CASE : Union[str, Any]="[CLS]" , SCREAMING_SNAKE_CASE : Dict="[MASK]" , **SCREAMING_SNAKE_CASE : Optional[int] , ):
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase__ : List[str] = do_lower_case
lowercase__ : int = remove_space
lowercase__ : Optional[Any] = keep_accents
lowercase__ : str = vocab_file
lowercase__ : str = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCAmelCase_ )
@property
def snake_case ( self : Dict ):
return len(self.sp_model )
def snake_case ( self : Dict ):
lowercase__ : int = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
lowercase__ : Union[str, Any] = self.__dict__.copy()
lowercase__ : Dict = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = d
lowercase__ : Dict = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int=False ):
lowercase__ : Dict = self.sp_model.EncodeAsPieces(lowerCAmelCase_ )
return pieces
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
return self.sp_model.PieceToId(lowerCAmelCase_ )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Tuple = self.sp_model.decode_pieces(lowerCAmelCase_ )
return out_string
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
lowercase__ : List[str] = [self.sep_token_id]
lowercase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCAmelCase_ ) )
return
lowercase__ : int = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 130 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : List[Any]=None ):
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCAmelCase_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
a_ = OPTConfig
a_ = {}
a_ = """gelu"""
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Any=9_9 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=2_0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_6 , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = embed_dim
__lowerCAmelCase = word_embed_proj_dim
__lowerCAmelCase = False
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
__lowerCAmelCase = prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = TFOPTModel(config=lowerCAmelCase_ )
__lowerCAmelCase = inputs_dict['input_ids']
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict['attention_mask'][:1, :]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a_ = (TFOPTForCausalLM,) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
a_ = 10
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFOPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
__lowerCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
__lowerCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return tf.constant(lowerCAmelCase_, dtype=tf.intaa )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = 99
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
__lowerCAmelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
__lowerCAmelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Dict:
super().setUp()
__lowerCAmelCase = 'facebook/opt-350m'
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Optional[int] ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase ( self : int ) -> str:
__lowerCAmelCase = 'facebook/opt-125m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = 'left'
# use different length sentences to test batching
__lowerCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ )
__lowerCAmelCase = inputs['input_ids']
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs['attention_mask'] )
__lowerCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ )
__lowerCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
__lowerCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 284 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__snake_case : Dict =pytest.mark.integration
@require_faiss
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(lowerCAmelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
import faiss
lowerCAmelCase__ : Optional[int] = self._create_dummy_dataset()
lowerCAmelCase__ : Any = dset.map(
lambda __lowerCamelCase ,__lowerCamelCase : {"vecs": i * np.ones(5 ,dtype=np.floataa )} ,with_indices=lowerCAmelCase_ ,keep_in_memory=lowerCAmelCase_ )
lowerCAmelCase__ : Optional[int] = dset.add_faiss_index('''vecs''' ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = dset.get_nearest_examples('''vecs''' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''' )
dset.drop_index('''vecs''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
import faiss
lowerCAmelCase__ : Tuple = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='''vecs''' ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
lowerCAmelCase__ , lowerCAmelCase__ : str = dset.get_nearest_examples('''vecs''' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
import faiss
lowerCAmelCase__ : Dict = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='''vecs''' ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
dset.save_faiss_index('''vecs''' ,tmp_file.name )
dset.load_faiss_index('''vecs2''' ,tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase__ , lowerCAmelCase__ : Any = dset.get_nearest_examples('''vecs2''' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''' )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(lowerCAmelCase_ ,partial(dset.get_nearest_examples ,'''vecs2''' ,np.ones(5 ,dtype=np.floataa ) ) )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase__ : Any = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowerCAmelCase__ : Optional[Any] = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase__ : Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
lowerCAmelCase__ : Tuple = Elasticsearch()
dset.add_elasticsearch_index('''filename''' ,es_client=lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = dset.get_nearest_examples('''filename''' ,'''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''' )
@require_faiss
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
import faiss
lowerCAmelCase__ : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal ,5 )
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal ,10 )
# single query
lowerCAmelCase__ : Optional[Any] = np.zeros(5 ,dtype=np.floataa )
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = index.search(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ ,index.search ,query.reshape(-1 ,1 ) )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
# batched queries
lowerCAmelCase__ : Optional[Any] = np.eye(5 ,dtype=np.floataa )[::-1]
lowerCAmelCase__ , lowerCAmelCase__ : Any = index.search_batch(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ ,index.search_batch ,queries[0] )
lowerCAmelCase__ : str = [scores[0] for scores in total_scores]
lowerCAmelCase__ : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) ,0 )
self.assertListEqual([4, 3, 2, 1, 0] ,lowerCAmelCase_ )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
import faiss
lowerCAmelCase__ : Tuple = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
lowerCAmelCase__ : Union[str, Any] = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase_ ):
lowerCAmelCase__ : List[Any] = FaissIndex(string_factory='''Flat''' ,custom_index=faiss.IndexFlat(5 ) )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
import faiss
lowerCAmelCase__ : str = faiss.IndexFlat(5 )
lowerCAmelCase__ : Dict = FaissIndex(custom_index=lowerCAmelCase_ )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
import faiss
lowerCAmelCase__ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase__ : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase__ : Optional[Any] = np.zeros(5 ,dtype=np.floataa )
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ , lowerCAmelCase__ : str = index.search(lowerCAmelCase_ )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
@require_faiss
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
import faiss
lowerCAmelCase__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 ,dtype=np.floataa))
lowerCAmelCase__ : Any = '''index.faiss'''
lowerCAmelCase__ : Tuple = f"""mock://{index_name}"""
index.save(lowerCAmelCase_ ,storage_options=mockfs.storage_options)
lowerCAmelCase__ : List[Any] = FaissIndex.load(lowerCAmelCase_ ,storage_options=mockfs.storage_options)
lowerCAmelCase__ : Optional[int] = np.zeros(5 ,dtype=np.floataa)
lowerCAmelCase__ : int = 1
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = index.search(lowerCAmelCase_)
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowerCAmelCase__ : Any = Elasticsearch()
lowerCAmelCase__ : Union[str, Any] = {'''acknowledged''': True}
lowerCAmelCase__ : Optional[Any] = ElasticSearchIndex(es_client=lowerCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
lowerCAmelCase__ : Dict = '''foo'''
lowerCAmelCase__ : Any = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = index.search(lowerCAmelCase_ )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# single query with timeout
lowerCAmelCase__ : Optional[Any] = '''foo'''
lowerCAmelCase__ : Tuple = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = index.search(lowerCAmelCase_ ,request_timeout=30 )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# batched queries
lowerCAmelCase__ : int = ['''foo''', '''bar''', '''foobar''']
lowerCAmelCase__ : List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = index.search_batch(lowerCAmelCase_ )
lowerCAmelCase__ : Optional[Any] = [scores[0] for scores in total_scores]
lowerCAmelCase__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) ,0 )
self.assertListEqual([1, 1, 1] ,lowerCAmelCase_ )
# batched queries with timeout
lowerCAmelCase__ : Any = ['''foo''', '''bar''', '''foobar''']
lowerCAmelCase__ : Tuple = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowerCAmelCase__ , lowerCAmelCase__ : Any = index.search_batch(lowerCAmelCase_ ,request_timeout=30 )
lowerCAmelCase__ : Dict = [scores[0] for scores in total_scores]
lowerCAmelCase__ : Any = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) ,0 )
self.assertListEqual([1, 1, 1] ,lowerCAmelCase_ )
| 129 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['LayoutLMv3FeatureExtractor']
_snake_case : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__a = random.Random()
def __UpperCAmelCase ( a_: List[Any], a_: List[Any]=1.0, a_: Dict=None, a_: Tuple=None ):
if rng is None:
_UpperCAmelCase : Union[str, Any] = global_rng
_UpperCAmelCase : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple=7 , lowerCAmelCase__ : int=4_0_0 , lowerCAmelCase__ : Any=2_0_0_0 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : List[Any]=1_6_0_0_0 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[Any]=8_0 , lowerCAmelCase__ : List[Any]=1_6 , lowerCAmelCase__ : Tuple=6_4 , lowerCAmelCase__ : Optional[Any]="hann_window" , lowerCAmelCase__ : List[Any]=8_0 , lowerCAmelCase__ : str=7_6_0_0 , lowerCAmelCase__ : Optional[int]=1e-10 , lowerCAmelCase__ : Optional[int]=True , ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Optional[Any] = batch_size
_UpperCAmelCase : str = min_seq_length
_UpperCAmelCase : str = max_seq_length
_UpperCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : Union[str, Any] = feature_size
_UpperCAmelCase : List[str] = padding_value
_UpperCAmelCase : List[str] = sampling_rate
_UpperCAmelCase : Any = do_normalize
_UpperCAmelCase : str = num_mel_bins
_UpperCAmelCase : Tuple = hop_length
_UpperCAmelCase : Tuple = win_length
_UpperCAmelCase : List[str] = win_function
_UpperCAmelCase : Optional[int] = fmin
_UpperCAmelCase : Dict = fmax
_UpperCAmelCase : Dict = mel_floor
_UpperCAmelCase : List[str] = return_attention_mask
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
def _flatten(lowerCAmelCase__ : Union[str, Any] ):
return list(itertools.chain(*lowerCAmelCase_ ) )
if equal_length:
_UpperCAmelCase : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCAmelCase : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase : Tuple = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if equal_length:
_UpperCAmelCase : List[Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase : str = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase : str = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class A__ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = SpeechTaFeatureExtractor
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = SpeechTaFeatureExtractionTester(self )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : List[str] ) -> Any:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCAmelCase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase_ , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase : Optional[Any] = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase : Tuple = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
_UpperCAmelCase : Any = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
# Test batched
_UpperCAmelCase : str = feat_extract(lowerCAmelCase_ , return_tensors="np" ).input_values
_UpperCAmelCase : int = feat_extract(lowerCAmelCase_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase : List[str] = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase : str = [None, 1_6_0_0, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = feat_extract(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors="np" )
_UpperCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Dict = range(8_0_0 , 1_4_0_0 , 2_0_0 )
_UpperCAmelCase : Dict = [floats_list((1, x) )[0] for x in lengths]
_UpperCAmelCase : Optional[int] = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase : Tuple = [None, 1_6_0_0, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = feat_extract(lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ )
_UpperCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase : Union[str, Any] = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=1_0_0_0 , padding="max_length" , return_tensors="np" )
_UpperCAmelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase : int = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=1_0_0_0 , padding="longest" , return_tensors="np" )
_UpperCAmelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
_UpperCAmelCase : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase : Dict = feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=2_0_0_0 , padding="longest" , return_tensors="np" )
_UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Tuple = np.random.rand(1_0_0 ).astype(np.floataa )
_UpperCAmelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : Optional[int] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCAmelCase : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase : Optional[int] = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase : Dict = feature_extractor(audio_target=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_UpperCAmelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
_UpperCAmelCase : int = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
# Test batched
_UpperCAmelCase : Optional[int] = feature_extractor(lowerCAmelCase_ , return_tensors="np" ).input_values
_UpperCAmelCase : Optional[Any] = feature_extractor(lowerCAmelCase_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_UpperCAmelCase : int = np.asarray(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = feature_extractor(lowerCAmelCase_ , return_tensors="np" ).input_values
_UpperCAmelCase : Union[str, Any] = feature_extractor(lowerCAmelCase_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCAmelCase : Union[str, Any] = feat_extract.model_input_names[0]
_UpperCAmelCase : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , processed_features[input_name] ) ) )
_UpperCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase_ )
_UpperCAmelCase : int = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
_UpperCAmelCase : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_UpperCAmelCase : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCAmelCase : List[Any] = feat_extract.model_input_names[0]
_UpperCAmelCase : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
_UpperCAmelCase : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_UpperCAmelCase : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCAmelCase : Optional[int] = feat_extract.model_input_names[0]
_UpperCAmelCase : Dict = BatchFeature({input_name: speech_inputs} )
_UpperCAmelCase : Any = feat_extract.num_mel_bins # hack!
_UpperCAmelCase : Optional[Any] = feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="np" )[input_name]
_UpperCAmelCase : Dict = feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.feat_extract_dict
_UpperCAmelCase : str = True
_UpperCAmelCase : Any = self.feature_extraction_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCAmelCase : List[Any] = [len(lowerCAmelCase_ ) for x in speech_inputs]
_UpperCAmelCase : Tuple = feat_extract.model_input_names[0]
_UpperCAmelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_UpperCAmelCase : List[Any] = feat_extract.num_mel_bins # hack!
_UpperCAmelCase : Dict = feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase_ )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = self.feat_extract_dict
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Optional[int] = self.feature_extraction_class(**lowerCAmelCase_ )
_UpperCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_target()
_UpperCAmelCase : str = [len(lowerCAmelCase_ ) for x in speech_inputs]
_UpperCAmelCase : int = feat_extract.model_input_names[0]
_UpperCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
_UpperCAmelCase : str = min(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = feat_extract.num_mel_bins # hack!
_UpperCAmelCase : Optional[int] = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
from datasets import load_dataset
_UpperCAmelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] )
# fmt: on
_UpperCAmelCase : List[Any] = self._load_datasamples(1 )
_UpperCAmelCase : Optional[int] = SpeechTaFeatureExtractor()
_UpperCAmelCase : Any = feature_extractor(lowerCAmelCase_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , lowerCAmelCase_ , atol=1e-6 ) )
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
_UpperCAmelCase : List[str] = self._load_datasamples(1 )
_UpperCAmelCase : Optional[Any] = SpeechTaFeatureExtractor()
_UpperCAmelCase : Tuple = feature_extractor(audio_target=lowerCAmelCase_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , lowerCAmelCase_ , atol=1e-4 ) ) | 145 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 284 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = np.random.default_rng(lowerCAmelCase_ )
UpperCAmelCase : Any = length
UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa )
UpperCAmelCase : Optional[Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return self.length
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class SCREAMING_SNAKE_CASE__ ( torch.nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase : Union[str, Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase : int = True
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
'''simple docstring'''
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
UpperCAmelCase : List[str] = False
return x * self.a[0] + self.b[0]
class SCREAMING_SNAKE_CASE__ ( torch.nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[int] = torch.nn.Parameter(torch.tensor(lowerCAmelCase_ ).float() )
UpperCAmelCase : Any = torch.nn.Parameter(torch.tensor(lowerCAmelCase_ ).float() )
UpperCAmelCase : Union[str, Any] = True
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
UpperCAmelCase : Dict = False
return x * self.a + self.b
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : int = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCAmelCase : Union[str, Any] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
UpperCAmelCase : Optional[int] = load_dataset("""csv""" , data_files=lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = datasets["""train"""].unique("""label""" )
UpperCAmelCase : Any = {v: i for i, v in enumerate(lowerCAmelCase_ )}
def tokenize_function(UpperCamelCase : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase : Union[str, Any] = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" )
if "label" in examples:
UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase : Dict = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(UpperCamelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCAmelCase : Any = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=2 )
UpperCAmelCase : Optional[int] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 109 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_snake_case : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_snake_case : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_snake_case : set[int] = {ord(char) for char in VALID_CHARS}
_snake_case : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCAmelCase_ : list[int], lowerCAmelCase_ : tuple[int, ...] ):
__lowerCAmelCase = ""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ), lowerCAmelCase_ ):
__lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def a_ ( lowerCAmelCase_ : list[int] ):
__lowerCAmelCase = []
for key in product(lowerCAmelCase_, repeat=3 ):
__lowerCAmelCase = try_key(lowerCAmelCase_, lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def a_ ( lowerCAmelCase_ : list[str], lowerCAmelCase_ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCAmelCase_ : str = "p059_cipher.txt" ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding='utf-8' )
__lowerCAmelCase = [int(lowerCAmelCase_ ) for number in data.strip().split(',' )]
__lowerCAmelCase = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
__lowerCAmelCase = filter_common_word(lowerCAmelCase_, lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
__lowerCAmelCase = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 284 | 0 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_="shi-labs/oneformer_demo" ):
with open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) as f:
UpperCAmelCase : Any = json.load(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = {}
UpperCAmelCase : str = []
UpperCAmelCase : int = []
for key, info in class_info.items():
UpperCAmelCase : Tuple = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase_ ) )
UpperCAmelCase : int = thing_ids
UpperCAmelCase : int = class_names
return metadata
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : Tuple=7 , lowercase_ : List[str]=3 , lowercase_ : Optional[Any]=30 , lowercase_ : Optional[Any]=400 , lowercase_ : List[str]=None , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=True , lowercase_ : List[str]=[0.5, 0.5, 0.5] , lowercase_ : Dict=[0.5, 0.5, 0.5] , lowercase_ : Optional[Any]=10 , lowercase_ : Optional[int]=False , lowercase_ : Optional[Any]=255 , lowercase_ : List[str]="shi-labs/oneformer_demo" , lowercase_ : Tuple="ade20k_panoptic.json" , lowercase_ : List[Any]=10 , ) -> Dict:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : Optional[int] = min_resolution
UpperCAmelCase : Any = max_resolution
UpperCAmelCase : List[Any] = do_resize
UpperCAmelCase : List[Any] = {'shortest_edge': 32, 'longest_edge': 1_333} if size is None else size
UpperCAmelCase : str = do_normalize
UpperCAmelCase : Any = image_mean
UpperCAmelCase : Optional[Any] = image_std
UpperCAmelCase : Optional[Any] = class_info_file
UpperCAmelCase : Any = prepare_metadata(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : List[Any] = num_text
UpperCAmelCase : str = repo_path
# for the post_process_functions
UpperCAmelCase : List[str] = 2
UpperCAmelCase : str = 10
UpperCAmelCase : List[Any] = 10
UpperCAmelCase : Union[str, Any] = 3
UpperCAmelCase : Any = 4
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Tuple = do_reduce_labels
UpperCAmelCase : Union[str, Any] = ignore_index
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any]=False ) -> List[str]:
if not batched:
UpperCAmelCase : Optional[int] = image_inputs[0]
if isinstance(lowerCAmelCase_ , Image.Image ):
UpperCAmelCase , UpperCAmelCase : int = image.size
else:
UpperCAmelCase , UpperCAmelCase : List[str] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase : Any = int(self.size['shortest_edge'] * h / w )
UpperCAmelCase : List[str] = self.size['shortest_edge']
elif w > h:
UpperCAmelCase : List[str] = self.size['shortest_edge']
UpperCAmelCase : Optional[Any] = int(self.size['shortest_edge'] * w / h )
else:
UpperCAmelCase : List[str] = self.size['shortest_edge']
UpperCAmelCase : Dict = self.size['shortest_edge']
else:
UpperCAmelCase : str = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase : Any = max(lowerCAmelCase_ , key=lambda lowercase_ : item[0] )[0]
UpperCAmelCase : int = max(lowerCAmelCase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class A_ ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCAmelCase_ : int = image_processing_class
def UpperCAmelCase_ ( self : int ) -> List[Any]:
UpperCAmelCase : Tuple = OneFormerImageProcessorTester(self )
@property
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'ignore_index' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'class_info_file' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'num_text' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'repo_path' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'metadata' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_reduce_labels' ) )
def UpperCAmelCase_ ( self : str ) -> Tuple:
pass
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Dict = self.image_processing_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : Dict = self.image_processing_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
UpperCAmelCase : Dict = image_processor(
lowerCAmelCase_ , ['semantic'] * len(lowerCAmelCase_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
# Initialize image_processor
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
UpperCAmelCase : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Any = self.image_processing_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : Any = self.image_processing_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = image_processor(
lowerCAmelCase_ , ['semantic'] * len(lowerCAmelCase_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Any ) -> Dict:
# Initialize image_processor
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : List[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Tuple = self.image_processing_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : List[str] = self.image_processing_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
UpperCAmelCase : str = image_processor(
lowerCAmelCase_ , ['semantic'] * len(lowerCAmelCase_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Union[str, Any]=False , lowercase_ : int=False , lowercase_ : str="np" ) -> List[str]:
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase : Dict = self.image_processing_tester.num_labels
UpperCAmelCase : Any = None
UpperCAmelCase : int = None
UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase_ )
if with_segmentation_maps:
UpperCAmelCase : List[Any] = num_labels
if is_instance_map:
UpperCAmelCase : List[str] = list(range(lowerCAmelCase_ ) ) * 2
UpperCAmelCase : Optional[Any] = dict(enumerate(lowerCAmelCase_ ) )
UpperCAmelCase : Any = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase : Any = [Image.fromarray(lowerCAmelCase_ ) for annotation in annotations]
UpperCAmelCase : int = image_processor(
lowerCAmelCase_ , ['semantic'] * len(lowerCAmelCase_ ) , lowerCAmelCase_ , return_tensors='pt' , instance_id_to_semantic_id=lowerCAmelCase_ , pad_and_return_pixel_mask=lowerCAmelCase_ , )
return inputs
def UpperCAmelCase_ ( self : str ) -> Tuple:
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
def common(lowercase_ : List[Any]=False , lowercase_ : Union[str, Any]=None ):
UpperCAmelCase : Optional[Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCAmelCase_ , is_instance_map=lowerCAmelCase_ , segmentation_type=lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = inputs['mask_labels']
UpperCAmelCase : List[Any] = inputs['class_labels']
UpperCAmelCase : Tuple = inputs['pixel_values']
UpperCAmelCase : List[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCAmelCase_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCAmelCase_ )
common(is_instance_map=lowerCAmelCase_ , segmentation_type='pil' )
common(is_instance_map=lowerCAmelCase_ , segmentation_type='pil' )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
UpperCAmelCase : List[str] = np.zeros((20, 50) )
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : int = 1
UpperCAmelCase : Any = binary_mask_to_rle(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCAmelCase_ ( self : Any ) -> str:
UpperCAmelCase : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCAmelCase : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : Dict = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase : int = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase_ , target_sizes=lowerCAmelCase_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCAmelCase : int = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : Optional[int] = image_processor.post_process_instance_segmentation(lowerCAmelCase_ , threshold=0 )
self.assertTrue(len(lowerCAmelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , lowerCAmelCase_ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCAmelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : Any = image_processor.post_process_panoptic_segmentation(lowerCAmelCase_ , threshold=0 )
self.assertTrue(len(lowerCAmelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , lowerCAmelCase_ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 151 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_snake_case : Tuple = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_snake_case : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a_ ( lowerCAmelCase_ : str ):
if "://" in dataset_path:
__lowerCAmelCase = dataset_path.split('://' )[1]
return dataset_path
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a_ ( lowerCAmelCase_ : fsspec.AbstractFileSystem, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = not is_remote_filesystem(lowerCAmelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase_ ), fs._strip_protocol(lowerCAmelCase_ ) )
else:
fs.mv(lowerCAmelCase_, lowerCAmelCase_, recursive=lowerCAmelCase_ )
def a_ ( ):
if hasattr(fsspec.asyn, 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = threading.Lock()
| 284 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__lowercase : Tuple = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__lowercase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase_ ( _lowercase ) -> Any:
'''simple docstring'''
if "://" in dataset_path:
lowerCamelCase_ : int = dataset_path.split('''://''' )[1]
return dataset_path
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Tuple = not is_remote_filesystem(lowerCAmelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase_ ) , fs._strip_protocol(lowerCAmelCase_ ) )
else:
fs.mv(lowerCAmelCase_ , lowerCAmelCase_ , recursive=lowerCAmelCase_ )
def lowercase_ ( ) -> str:
'''simple docstring'''
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Optional[Any] = threading.Lock()
| 318 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int] ):
if metric == "rouge2":
__lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__lowerCAmelCase = ModelCheckpoint(
dirpath=lowerCAmelCase_, filename=lowerCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ):
return EarlyStopping(
monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase_, verbose=lowerCAmelCase_, )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / 'test_results.txt'
__lowerCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'a+' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Dict:
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , 'test' )
@rank_zero_only
def lowercase ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Any ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 284 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict=0 ):
# Format the message.
if name is None:
__a : List[str] = None
else:
__a : Union[str, Any] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(5_0 - spaces ) + '''s}'''
__a : Any = fmt.format(lowerCAmelCase_ )
# Print and recurse (if needed).
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if msg is not None:
print(lowerCAmelCase_ )
for k in val.keys():
recursive_print(lowerCAmelCase_ , val[k] , spaces + 2 )
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
print(lowerCAmelCase_ , ''':''' , val.size() )
else:
print(lowerCAmelCase_ , ''':''' , lowerCAmelCase_ )
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
__a : int = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__a : str = (num_heads, hidden_size, num_splits) + input_shape[1:]
__a : List[Any] = param.view(*lowerCAmelCase_ )
__a : Any = param.transpose(0 , 2 )
__a : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__a : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:]
__a : int = param.view(*lowerCAmelCase_ )
__a : str = param.transpose(0 , 1 ).contiguous()
__a : Union[str, Any] = param.view(*lowerCAmelCase_ )
return param
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ):
# The converted output model.
__a : Optional[Any] = {}
# old versions did not store training args
__a : Any = input_state_dict.get('''args''' , lowerCAmelCase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__a : str = ds_args.padded_vocab_size
__a : Any = ds_args.max_position_embeddings
__a : Any = ds_args.hidden_size
__a : str = ds_args.num_layers
__a : Any = ds_args.num_attention_heads
__a : Any = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__a : Union[str, Any] = config.n_head
# The hidden_size per head.
__a : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__a : Optional[Any] = input_state_dict['''checkpoint_version''']
else:
__a : Optional[int] = 0.0
# The model.
__a : Tuple = input_state_dict['''model''']
# The language model.
__a : Union[str, Any] = model['''language_model''']
# The embeddings.
__a : Dict = lm['''embedding''']
# The word embeddings.
__a : Optional[Any] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
__a : Optional[int] = word_embeddings[: config.vocab_size, :]
__a : Tuple = word_embeddings
# The position embeddings.
__a : Union[str, Any] = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__a : List[Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
__a : Optional[int] = pos_embeddings
# The transformer.
__a : Dict = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
__a : List[str] = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
__a : Optional[Any] = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__a : Union[str, Any] = layer_re.match(lowerCAmelCase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__a : int = int(m.group(1 ) )
# The name of the operation.
__a : int = m.group(2 )
# Is it a weight or a bias?
__a : Any = m.group(3 )
# The name of the layer.
__a : List[str] = f"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
__a : Union[str, Any] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
__a : Dict = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__a : Union[str, Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCAmelCase_ , lowerCAmelCase_ )
__a : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
__a : Any = torch.tensor(-1e4 , dtype=torch.floataa )
__a : Any = masked_bias
__a : Union[str, Any] = fix_query_key_value_ordering(lowerCAmelCase_ , lowerCAmelCase_ , 3 , lowerCAmelCase_ , lowerCAmelCase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__a : List[str] = out_val.transpose(0 , 1 ).contiguous()
# Store.
__a : Optional[int] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__a : Any = fix_query_key_value_ordering(lowerCAmelCase_ , lowerCAmelCase_ , 3 , lowerCAmelCase_ , lowerCAmelCase_ )
# Store. No change of shape.
__a : Optional[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__a : Tuple = megatron_to_transformers[op_name]
__a : Optional[Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__a : Any = megatron_to_transformers[op_name]
__a : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__a : Dict = transformer['''final_layernorm.weight''']
__a : Dict = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
__a : Union[str, Any] = word_embeddings
# It should be done!
return output_state_dict
def __UpperCamelCase ( ):
# Create the argument parser.
__a : Any = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=lowerCAmelCase_ , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=lowerCAmelCase_ , help='''An optional config json file describing the pre-trained model.''' , )
__a : str = parser.parse_args()
# Extract the basename.
__a : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
__a : Optional[int] = torch.load(lowerCAmelCase_ , map_location='''cpu''' )
else:
__a : Dict = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
__a : List[Any] = input_state_dict.get('''args''' , lowerCAmelCase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__a : Optional[int] = '''gelu_fast'''
elif ds_args.openai_gelu:
__a : Dict = '''gelu_new'''
else:
__a : Any = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
__a : Union[str, Any] = '''gelu_new'''
# Spell out all parameters in case the defaults change.
__a : Optional[int] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=lowerCAmelCase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=lowerCAmelCase_ , summary_activation=lowerCAmelCase_ , summary_proj_to_labels=lowerCAmelCase_ , summary_first_dropout=0.1 , scale_attn_weights=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
__a : Any = GPTaConfig.from_json_file(args.config_file )
__a : Tuple = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
__a : List[str] = convert_megatron_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCAmelCase_ , lowerCAmelCase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__a : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__a : List[str] = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
__a : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" )
else:
__a : Optional[int] = '''gpt2'''
__a : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__a : Any = type(lowerCAmelCase_ ).__name__
__a : Tuple = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(lowerCAmelCase_ )
# Save tokenizer based on args
print(f"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(lowerCAmelCase_ )
# Store the state_dict to file.
__a : List[str] = os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' )
print(f"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 216 |
import re
from filelock import FileLock
try:
import nltk
_snake_case : Any = True
except (ImportError, ModuleNotFoundError):
_snake_case : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def a_ ( lowerCAmelCase_ : str ):
re.sub('<n>', '', lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 284 | 0 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
while b:
UpperCAmelCase__ , UpperCAmelCase__ = b, a % b
return a
def a_ ( lowerCamelCase , lowerCamelCase ):
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase_ , a % b )
def a_ ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 98 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase__ = {ord(char) for char in VALID_CHARS}
UpperCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : str = ''''''
UpperCAmelCase__ : Union[str, Any] = 42
UpperCAmelCase__ : Any = 42
UpperCAmelCase__ : Optional[int] = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ) , lowerCAmelCase_ ):
UpperCAmelCase__ : Tuple = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def a__ ( lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : Dict = []
for key in product(lowerCAmelCase_ , repeat=3 ):
UpperCAmelCase__ : Dict = try_key(lowerCAmelCase_ , lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
return [possible for possible in possibles if common_word in possible.lower()]
def a__ ( lowerCAmelCase__ = "p059_cipher.txt" ) -> Optional[int]:
UpperCAmelCase__ : int = 42
UpperCAmelCase__ : Optional[Any] = 42
UpperCAmelCase__ : Optional[int] = 42
UpperCAmelCase__ : List[str] = 42
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding='''utf-8''' )
UpperCAmelCase__ : Optional[int] = [int(lowerCAmelCase_ ) for number in data.strip().split(''',''' )]
UpperCAmelCase__ : Tuple = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
UpperCAmelCase__ : int = filter_common_word(lowerCAmelCase_ , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
UpperCAmelCase__ : List[Any] = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 181 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_snake_case : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
_snake_case : str = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
_snake_case : List[str] = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def lowercase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
__lowerCAmelCase = 0.0
for i, j in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase_ , lowerCAmelCase_ ) else 0.0
__lowerCAmelCase = n_correct / len(lowerCAmelCase_ )
return {
"accuracy": accuracy,
}
| 284 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _a ( ):
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _a ( ):
"""simple docstring"""
lowercase__ = '''mock-s3-bucket'''
lowercase__ = f's3://{mock_bucket}'
lowercase__ = extract_path_from_uri(lowerCAmelCase_ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase__ = '''./local/path'''
lowercase__ = extract_path_from_uri(lowerCAmelCase_ )
assert dataset_path == new_dataset_path
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = is_remote_filesystem(lowerCAmelCase_ )
assert is_remote is True
lowercase__ = fsspec.filesystem('''file''' )
lowercase__ = is_remote_filesystem(lowerCAmelCase_ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , lowerCAmelCase_ )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase__ = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase__ = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase_ )
lowercase__ = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ = os.path.basename(lowerCAmelCase_ )
lowercase__ = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f, open(lowerCAmelCase_ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase__ = compressed_file_paths[protocol]
lowercase__ = '''dataset.jsonl'''
lowercase__ = f'{protocol}://{member_file_path}::{compressed_file_path}'
lowercase__ , *lowercase__ = fsspec.get_fs_token_paths(lowerCAmelCase_ )
assert fs.isfile(lowerCAmelCase_ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = hf_api.dataset_info(lowerCAmelCase_ , token=lowerCAmelCase_ )
lowercase__ = HfFileSystem(repo_info=lowerCAmelCase_ , token=lowerCAmelCase_ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(lowerCAmelCase_ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def _a ( ):
"""simple docstring"""
lowercase__ = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCAmelCase_ , lowerCAmelCase_ , clobber=lowerCAmelCase_ )
with pytest.warns(lowerCAmelCase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCAmelCase_ ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 110 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowerCAmelCase = CLIPTextModel(lowerCAmelCase_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=0 ) -> Dict:
__lowerCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' )
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = 'french fries'
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = [inputs['prompt']] * 2
__lowerCAmelCase = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
__lowerCAmelCase = image / 2 + 0.5
__lowerCAmelCase = image.permute(0 , 3 , 1 , 2 )
__lowerCAmelCase = image.repeat(2 , 1 , 1 , 1 )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = [round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' ) )[0]
__lowerCAmelCase = components['vae']
__lowerCAmelCase = self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowerCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )[0]
__lowerCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] , lowerCAmelCase_ : List[Any]=0 ) -> Any:
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
__lowerCAmelCase = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> Dict:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = 0
def callback_fn(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor ) -> None:
__lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__lowerCAmelCase = False
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase ( self : Optional[int] ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCAmelCase = inputs['image'].resize((5_0_4, 5_0_4) )
__lowerCAmelCase = 'timbrooks/instruct-pix2pix'
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__lowerCAmelCase = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 284 | 0 |
UpperCamelCase = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 306 |
from timeit import timeit
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a_ ( ):
def do_benchmark(lowerCAmelCase_ : int ) -> None:
__lowerCAmelCase = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit('z.get_set_bits_count_using_modulo_operator(25)', setup=lowerCAmelCase_ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)', setup=lowerCAmelCase_, )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 284 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase__ : List[str] = deprecated_arg[3:]
lowercase__ : Optional[int] = not kwargs.pop(lowerCAmelCase_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase__ : List[Any] = kwargs.pop("tpu_name" , self.tpu_name )
lowercase__ : Optional[int] = kwargs.pop("device_idx" , self.device_idx )
lowercase__ : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
lowercase__ : Any = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowerCAmelCase_ )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Name of TPU"""} , )
lowercase_ = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Benchmark models in eager model."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def snake_case ( self : int ):
requires_backends(self , ["tf"] )
lowercase__ : Any = None
if self.tpu:
try:
if self.tpu_name:
lowercase__ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase__ : str = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase__ : Tuple = None
return tpu
@cached_property
def snake_case ( self : str ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase__ : str = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
lowercase__ : Optional[int] = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
lowercase__ : int = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def snake_case ( self : Tuple ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def snake_case ( self : Union[str, Any] ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def snake_case ( self : List[Any] ):
return self.n_gpu > 0
| 130 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case : Dict = pytest.mark.integration
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCAmelCase_ ) for x in np.arange(3_0 ).tolist()]} )
return dset
def lowercase ( self : List[str] ) -> Tuple:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
__lowerCAmelCase = dset.map(
lambda lowerCAmelCase_ , lowerCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ )
__lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase ( self : Optional[Any] ) -> str:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : int ) -> Optional[Any]:
import faiss
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCAmelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase ( self : Union[str, Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search_batch , queries[0] )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> List[str]:
import faiss
__lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase_ ):
__lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase ( self : Union[str, Any] ) -> Dict:
import faiss
__lowerCAmelCase = faiss.IndexFlat(5 )
__lowerCAmelCase = FaissIndex(custom_index=lowerCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase ( self : str ) -> Any:
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
__lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
import faiss
__lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__lowerCAmelCase = 'index.faiss'
__lowerCAmelCase = F"""mock://{index_name}"""
index.save(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = FaissIndex.load(lowerCAmelCase_, storage_options=mockfs.storage_options )
__lowerCAmelCase = np.zeros(5, dtype=np.floataa )
__lowerCAmelCase = 1
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> int:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__lowerCAmelCase = Elasticsearch()
__lowerCAmelCase = {'acknowledged': True}
__lowerCAmelCase = ElasticSearchIndex(es_client=lowerCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__lowerCAmelCase = 'foo'
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search(lowerCAmelCase_ , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
# batched queries with timeout
__lowerCAmelCase = ['foo', 'bar', 'foobar']
__lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__lowerCAmelCase , __lowerCAmelCase = index.search_batch(lowerCAmelCase_ , request_timeout=3_0 )
__lowerCAmelCase = [scores[0] for scores in total_scores]
__lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
| 284 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ ,'''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ ,'''depth_multiplier''' ) )
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=3 ,__lowerCamelCase=32 ,__lowerCamelCase=0.25 ,__lowerCamelCase=8 ,__lowerCamelCase=True ,__lowerCamelCase=10_24 ,__lowerCamelCase=32 ,__lowerCamelCase="relu6" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.02 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=10 ,__lowerCamelCase=None ,) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : List[Any] = num_channels
lowerCAmelCase__ : str = image_size
lowerCAmelCase__ : Any = depth_multiplier
lowerCAmelCase__ : int = min_depth
lowerCAmelCase__ : Dict = tf_padding
lowerCAmelCase__ : Tuple = int(last_hidden_size * depth_multiplier )
lowerCAmelCase__ : Tuple = output_stride
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Optional[Any] = classifier_dropout_prob
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : Any = num_labels
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : Union[str, Any] = scope
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] ,self.num_labels )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowerCAmelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = MobileNetVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase__ : List[str] = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : int = self.num_labels
lowerCAmelCase__ : Tuple = MobileNetVaForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase__ : Any = model(lowerCAmelCase_ ,labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = config_and_inputs
lowerCAmelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
snake_case_ =(MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ =(
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =False
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = MobileNetVaModelTester(self )
lowerCAmelCase__ : Optional[Any] = MobileNetVaConfigTester(self ,config_class=lowerCAmelCase_ ,has_text_modality=lowerCAmelCase_ )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(lowerCAmelCase_ )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowerCAmelCase_ )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Optional[Any] = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**self._prepare_for_class(lowerCAmelCase_ ,lowerCAmelCase_ ) )
lowerCAmelCase__ : str = outputs.hidden_states
lowerCAmelCase__ : Tuple = 26
self.assertEqual(len(lowerCAmelCase_ ) ,lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
check_hidden_states_output(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
check_hidden_states_output(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase_ )
lowerCAmelCase__ : Optional[int] = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=lowerCAmelCase_ ,return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**lowerCAmelCase_ )
# verify the logits
lowerCAmelCase__ : Any = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,lowerCAmelCase_ )
lowerCAmelCase__ : Tuple = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase_ ,atol=1e-4 ) )
| 129 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected', [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase_, i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0, 4 ), range(4, 7 ), range(7, 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = _distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected', [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = _split_gen_kwargs(lowerCAmelCase_, lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected', [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
], )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any ):
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
__lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 284 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class A__ ( _UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = '''table-transformer'''
UpperCamelCase_ : Union[str, Any] = ['''past_key_values''']
UpperCamelCase_ : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Tuple , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Union[str, Any]=1_0_0 , lowerCAmelCase__ : Optional[Any]=6 , lowerCAmelCase__ : Any=2_0_4_8 , lowerCAmelCase__ : int=8 , lowerCAmelCase__ : List[str]=6 , lowerCAmelCase__ : Dict=2_0_4_8 , lowerCAmelCase__ : Dict=8 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : List[Any]="relu" , lowerCAmelCase__ : Optional[Any]=2_5_6 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : int=1.0 , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : int="resnet50" , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Union[str, Any]=1 , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : List[str]=0.1 , **lowerCAmelCase__ : Any , ) -> Dict:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : Dict = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = backbone_config.get("model_type" )
_UpperCAmelCase : Any = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : int = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = None, None, None
_UpperCAmelCase : Union[str, Any] = use_timm_backbone
_UpperCAmelCase : int = backbone_config
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : Any = num_queries
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Dict = encoder_ffn_dim
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : Dict = encoder_attention_heads
_UpperCAmelCase : Union[str, Any] = decoder_ffn_dim
_UpperCAmelCase : Tuple = decoder_layers
_UpperCAmelCase : Union[str, Any] = decoder_attention_heads
_UpperCAmelCase : List[Any] = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : Optional[Any] = activation_dropout
_UpperCAmelCase : Tuple = activation_function
_UpperCAmelCase : Optional[int] = init_std
_UpperCAmelCase : Optional[Any] = init_xavier_std
_UpperCAmelCase : Dict = encoder_layerdrop
_UpperCAmelCase : List[Any] = decoder_layerdrop
_UpperCAmelCase : Optional[int] = encoder_layers
_UpperCAmelCase : Optional[int] = auxiliary_loss
_UpperCAmelCase : Any = position_embedding_type
_UpperCAmelCase : str = backbone
_UpperCAmelCase : Dict = use_pretrained_backbone
_UpperCAmelCase : List[Any] = dilation
# Hungarian matcher
_UpperCAmelCase : Union[str, Any] = class_cost
_UpperCAmelCase : Optional[int] = bbox_cost
_UpperCAmelCase : Any = giou_cost
# Loss coefficients
_UpperCAmelCase : List[str] = mask_loss_coefficient
_UpperCAmelCase : int = dice_loss_coefficient
_UpperCAmelCase : List[str] = bbox_loss_coefficient
_UpperCAmelCase : Any = giou_loss_coefficient
_UpperCAmelCase : int = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
return self.d_model
class A__ ( _UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Any = version.parse('''1.11''' )
@property
def _lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1e-5
@property
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return 1_2 | 145 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
a_ = True
def lowercase ( self : Optional[Any] ) -> List[str]:
super().setUp()
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> str:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase ( self : List[Any] , lowerCAmelCase_ : str ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowercase ( self : List[str] ) -> Optional[int]:
pass # TODO add if relevant
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def lowercase ( self : Union[str, Any] ) -> Any:
pass # TODO add if relevant
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : List[Any] ) -> int:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : int ) -> str:
__lowerCAmelCase = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_jumanpp
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = JumanppTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = JumanppTokenizer(normalize_text=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
__lowerCAmelCase = tokenizer.subword_tokenizer
__lowerCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowerCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
__lowerCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowerCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
def lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : str , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase ( self : Dict ) -> str:
pass # TODO add if relevant
def lowercase ( self : Any ) -> str:
pass # TODO add if relevant
def lowercase ( self : List[Any] ) -> int:
pass # TODO add if relevant
def lowercase ( self : str ) -> str:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowerCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
__lowerCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 284 | 0 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
A: Union[str, Any] = logging.get_logger(__name__)
A: Any = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : List[str] = max_position_embeddings
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
UpperCAmelCase : Tuple = input_ids.shape[-1]
UpperCAmelCase : Optional[int] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model\'s predefined """
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
"""with `max_length = start_length + max_new_tokens` instead.""" , lowerCAmelCase_ , )
UpperCAmelCase : Dict = start_length
UpperCAmelCase : Tuple = max_new_tokens
UpperCAmelCase : Any = start_length + max_new_tokens
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Dict:
'''simple docstring'''
UpperCAmelCase : str = max_time
UpperCAmelCase : List[Any] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@add_start_docstrings(lowerCAmelCase_ )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return any(criteria(lowerCAmelCase_ , lowerCAmelCase_ ) for criteria in self )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return stopping_criterium.max_length
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return stopping_criterium.max_length
return None
def _snake_case ( UpperCamelCase : StoppingCriteriaList , UpperCamelCase : int ):
UpperCAmelCase : List[str] = stopping_criteria.max_length
UpperCAmelCase : str = deepcopy(lowerCAmelCase_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , lowerCAmelCase_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) )
return new_stopping_criteria
| 109 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """beit"""
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=8_1_9_2 , lowerCAmelCase_ : Optional[int]=7_6_8 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Any=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=[3, 5, 7, 1_1] , lowerCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=0.4 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=2_5_5 , **lowerCAmelCase_ : Any , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : Optional[Any] ) -> float:
return 1e-4
| 284 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : int , ) -> List[Any]:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Tuple = 13
UpperCAmelCase : int = 7
UpperCAmelCase : List[Any] = True
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Tuple = True
UpperCAmelCase : Optional[Any] = 99
UpperCAmelCase : Tuple = 32
UpperCAmelCase : Any = 2
UpperCAmelCase : List[Any] = 4
UpperCAmelCase : Union[str, Any] = 37
UpperCAmelCase : Dict = 'gelu'
UpperCAmelCase : List[str] = 0.1
UpperCAmelCase : Optional[int] = 0.1
UpperCAmelCase : Tuple = 512
UpperCAmelCase : List[str] = 16
UpperCAmelCase : int = 2
UpperCAmelCase : List[Any] = 0.02
UpperCAmelCase : Dict = 3
UpperCAmelCase : List[str] = 4
UpperCAmelCase : Optional[Any] = None
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Tuple = None
UpperCAmelCase : Dict = None
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Tuple = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Any = TFDistilBertModel(config=lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase : List[str] = model(lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase : str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ) -> List[str]:
UpperCAmelCase : Tuple = TFDistilBertForMaskedLM(config=lowerCAmelCase_ )
UpperCAmelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase : int = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : str , lowercase_ : Any , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any ) -> Tuple:
UpperCAmelCase : Optional[int] = TFDistilBertForQuestionAnswering(config=lowerCAmelCase_ )
UpperCAmelCase : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
UpperCAmelCase : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Any , lowercase_ : int ) -> Dict:
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : List[str] = TFDistilBertForSequenceClassification(lowerCAmelCase_ )
UpperCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase : str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : int ) -> Dict:
UpperCAmelCase : List[str] = self.num_choices
UpperCAmelCase : Tuple = TFDistilBertForMultipleChoice(lowerCAmelCase_ )
UpperCAmelCase : Dict = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : List[str] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : List[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
UpperCAmelCase : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : int = TFDistilBertForTokenClassification(lowerCAmelCase_ )
UpperCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase : str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs
UpperCAmelCase : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCAmelCase_ : List[str] = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[Any] = False
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
UpperCAmelCase : int = TFDistilBertModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_ )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase : List[str] = TFDistilBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : int = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
UpperCAmelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase : int = model(lowerCAmelCase_ )[0]
UpperCAmelCase : int = [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase_ )
UpperCAmelCase : Any = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
| 151 |
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 284 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowercase : List[Any] = False
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
return 1_2
@property
def UpperCAmelCase__ (self ):
return 1_2
@property
def UpperCAmelCase__ (self ):
return 3_2
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowerCAmelCase_ )
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Dict = 1_2
lowerCamelCase_ : Tuple = 1_2
lowerCamelCase_ : int = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCamelCase_ : Any = TransformeraDModel(**lowerCAmelCase_ )
return model
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''cpu'''
lowerCamelCase_ : Tuple = self.dummy_vqvae
lowerCamelCase_ : int = self.dummy_text_encoder
lowerCamelCase_ : Optional[int] = self.dummy_tokenizer
lowerCamelCase_ : str = self.dummy_transformer
lowerCamelCase_ : Optional[int] = VQDiffusionScheduler(self.num_embed )
lowerCamelCase_ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCAmelCase_ )
lowerCamelCase_ : Tuple = VQDiffusionPipeline(
vqvae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , transformer=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , learned_classifier_free_sampling_embeddings=lowerCAmelCase_ , )
lowerCamelCase_ : Optional[int] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCamelCase_ : List[Any] = '''teddy bear playing in the pool'''
lowerCamelCase_ : Tuple = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
lowerCamelCase_ : Dict = pipe([prompt] , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type='''np''' )
lowerCamelCase_ : List[str] = output.images
lowerCamelCase_ : Dict = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
lowerCamelCase_ : List[str] = pipe(
[prompt] , generator=lowerCAmelCase_ , output_type='''np''' , return_dict=lowerCAmelCase_ , num_inference_steps=2 )[0]
lowerCamelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCamelCase_ : Tuple = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = '''cpu'''
lowerCamelCase_ : List[str] = self.dummy_vqvae
lowerCamelCase_ : Optional[Any] = self.dummy_text_encoder
lowerCamelCase_ : int = self.dummy_tokenizer
lowerCamelCase_ : str = self.dummy_transformer
lowerCamelCase_ : str = VQDiffusionScheduler(self.num_embed )
lowerCamelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCamelCase_ : Optional[int] = VQDiffusionPipeline(
vqvae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , transformer=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , learned_classifier_free_sampling_embeddings=lowerCAmelCase_ , )
lowerCamelCase_ : Any = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCamelCase_ : Optional[int] = '''teddy bear playing in the pool'''
lowerCamelCase_ : List[str] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
lowerCamelCase_ : Dict = pipe([prompt] , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type='''np''' )
lowerCamelCase_ : List[Any] = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
lowerCamelCase_ : str = pipe(
[prompt] , generator=lowerCAmelCase_ , output_type='''np''' , return_dict=lowerCAmelCase_ , num_inference_steps=2 )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCamelCase_ : Tuple = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCamelCase_ : List[str] = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCamelCase_ : Tuple = pipeline.to(lowerCAmelCase_ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCamelCase_ : Optional[Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
lowerCamelCase_ : Any = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=lowerCAmelCase_ , output_type='''np''' , )
lowerCamelCase_ : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 318 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Any = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """pegasus"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , lowerCAmelCase_ : Union[str, Any]=5_0_2_6_5 , lowerCAmelCase_ : Union[str, Any]=1_0_2_4 , lowerCAmelCase_ : Union[str, Any]=1_2 , lowerCAmelCase_ : Dict=4_0_9_6 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : List[Any]=1_2 , lowerCAmelCase_ : Union[str, Any]=4_0_9_6 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=1_0_2_4 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Tuple , ) -> List[str]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
@property
def lowercase ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def lowercase ( self : Optional[Any] ) -> int:
return self.d_model
| 284 | 0 |
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : str = 0
for ch in input_str:
__a : List[Any] = ord(lowerCAmelCase_ )
__a : Dict = pow(2 , lowerCAmelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 |
def a_ ( lowerCAmelCase_ : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__lowerCAmelCase = 4
__lowerCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
__lowerCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 284 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "The output directory where the model will be written."} , )
snake_case__ = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
snake_case__ = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
snake_case__ = field(
default=_UpperCamelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
snake_case__ = field(
default=_UpperCamelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def a_ ( ):
UpperCAmelCase__ = HfArgumentParser((ModelArguments,) )
((UpperCAmelCase__ ) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
UpperCAmelCase__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
UpperCAmelCase__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
UpperCAmelCase__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
UpperCAmelCase__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCAmelCase_ , decoder_config=lowerCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
UpperCAmelCase__ = decoder_config.decoder_start_token_id
UpperCAmelCase__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
UpperCAmelCase__ = decoder_config.bos_token_id
if pad_token_id is None:
UpperCAmelCase__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
UpperCAmelCase__ = decoder_config.eos_token_id
UpperCAmelCase__ = decoder_start_token_id
UpperCAmelCase__ = pad_token_id
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 98 |
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : list[int], lowerCAmelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
return min(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(lowerCAmelCase_ ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 284 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Any:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase__ : Union[str, Any] = F"""Expected string as input, found {type(lowerCAmelCase_ )}"""
raise ValueError(lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase__ : int = F"""Expected boolean as use_pascal parameter, found {type(lowerCAmelCase_ )}"""
raise ValueError(lowerCAmelCase_ )
UpperCAmelCase__ : str = input_str.split('''_''' )
UpperCAmelCase__ : Any = 0 if use_pascal else 1
UpperCAmelCase__ : int = words[start_index:]
UpperCAmelCase__ : List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase__ : List[str] = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 181 |
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCAmelCase = pytest.mark.integration
lowerCAmelCase = {'comet'}
lowerCAmelCase = importlib.util.find_spec('fairseq') is not None
lowerCAmelCase = {'code_eval'}
lowerCAmelCase = os.name == 'nt'
lowerCAmelCase = {'bertscore', 'frugalscore', 'perplexity'}
lowerCAmelCase = importlib.util.find_spec('transformers') is not None
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@wraps(lowerCAmelCase_ )
def wrapper(self , SCREAMING_SNAKE_CASE ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@wraps(lowerCAmelCase_ )
def wrapper(self , SCREAMING_SNAKE_CASE ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@wraps(lowerCAmelCase_ )
def wrapper(self , SCREAMING_SNAKE_CASE ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , lowerCAmelCase_ )
return wrapper
def _a ( ):
"""simple docstring"""
lowercase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@local
class _a ( parameterized.TestCase ):
_lowercase : int = {}
_lowercase : Any = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = '''[...]'''
lowercase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , lowerCAmelCase_ ) ).module_path )
lowercase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase_ )
# check parameters
lowercase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCAmelCase_ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowercase__ = doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ = '''[...]'''
lowercase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , lowerCAmelCase_ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowercase__ = doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ) -> int:
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase_ ):
yield
else:
yield
@contextmanager
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
def load_local_metric(UpperCamelCase_: Any , *UpperCamelCase_: str , **UpperCamelCase_: List[str] ):
return load_metric(os.path.join('''metrics''' , lowerCAmelCase_ ) , *lowerCAmelCase_ , **lowerCAmelCase_ )
with patch('''datasets.load_metric''' ) as mock_load_metric:
lowercase__ = load_local_metric
yield
@classmethod
def lowerCamelCase_ ( cls: List[str] , UpperCamelCase_: List[Any] ) -> List[str]:
"""simple docstring"""
def wrapper(UpperCamelCase_: str ):
lowercase__ = contextmanager(lowerCAmelCase_ )
lowercase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class _a ( _UpperCamelCase ):
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Any] ) -> Dict:
"""simple docstring"""
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
lowercase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCAmelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
lowercase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def load_from_checkpoint(SCREAMING_SNAKE_CASE ):
class _a :
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any , *UpperCamelCase_: Any , **UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
assert len(lowerCAmelCase_ ) == 2
lowercase__ = [0.19, 0.92]
return scores, sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
lowercase__ = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
lowercase__ = load_from_checkpoint
yield
def _a ( ):
"""simple docstring"""
lowercase__ = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
lowercase__ = '''ERROR'''
lowercase__ = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(lowerCAmelCase_ , match=re.escape(lowerCAmelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowerCAmelCase_ )
| 110 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 284 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
if "model" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""model.""" ,"""""" )
if "norm1" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""norm1""" ,"""attention.output.LayerNorm""" )
if "norm2" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""norm2""" ,"""output.LayerNorm""" )
if "norm" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""norm""" ,"""LayerNorm""" )
if "transformer" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.split(""".""" )[0].split("""_""" )[-1]
_SCREAMING_SNAKE_CASE = orig_key.replace(F'transformer_{layer_num}' ,F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""mha.attn""" ,"""attention.self""" )
if "mha" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""mha""" ,"""attention""" )
if "W_q" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""W_q""" ,"""self.query""" )
if "W_k" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""W_k""" ,"""self.key""" )
if "W_v" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""W_v""" ,"""self.value""" )
if "ff1" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""ff1""" ,"""intermediate.dense""" )
if "ff2" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""ff2""" ,"""output.dense""" )
if "ff" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""ff""" ,"""output.dense""" )
if "mlm_class" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""mlm.mlm_class""" ,"""cls.predictions.decoder""" )
if "mlm" in orig_key:
_SCREAMING_SNAKE_CASE = orig_key.replace("""mlm""" ,"""cls.predictions.transform""" )
if "cls" not in orig_key:
_SCREAMING_SNAKE_CASE = """yoso.""" + orig_key
return orig_key
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(lowerCAmelCase_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_SCREAMING_SNAKE_CASE = val
_SCREAMING_SNAKE_CASE = orig_state_dict["""cls.predictions.decoder.bias"""]
_SCREAMING_SNAKE_CASE = torch.arange(lowerCAmelCase_ ).expand((1, -1) ) + 2
return orig_state_dict
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.load(lowerCAmelCase_ ,map_location="""cpu""" )["""model_state_dict"""]
_SCREAMING_SNAKE_CASE = YosoConfig.from_json_file(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = YosoForMaskedLM(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = convert_checkpoint_helper(config.max_position_embeddings ,lowerCAmelCase_ )
print(model.load_state_dict(lowerCAmelCase_ ) )
model.eval()
model.save_pretrained(lowerCAmelCase_ )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 306 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple=1024, lowerCAmelCase_ : Optional[Any]=1024, lowerCAmelCase_ : Tuple=False, **lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='train', **lowerCAmelCase_ )
__lowerCAmelCase = tok.pad_token_id
def get_lens(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = tqdm(
DataLoader(lowerCAmelCase_, batch_size=512, num_workers=8, shuffle=lowerCAmelCase_, collate_fn=ds.collate_fn ), desc=str(ds.len_file ), )
__lowerCAmelCase = []
for batch in dl:
__lowerCAmelCase = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
__lowerCAmelCase = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_, lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_, lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
__lowerCAmelCase = SeqaSeqDataset(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, type_path='val', **lowerCAmelCase_ )
__lowerCAmelCase = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_, train_ds.len_file )
pickle_save(lowerCAmelCase_, val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 284 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_2
lowercase_ = 4_2
class snake_case__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Union[str, Any] = [[] for _ in range(lowerCAmelCase_ )]
lowercase__ : List[Any] = size
def __getitem__( self : Any , SCREAMING_SNAKE_CASE : int ):
return iter(self._graph[vertex] )
@property
def snake_case ( self : int ):
return self._size
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : int = deque([start_vertex] )
lowercase__ : Tuple = [None] * self.size
lowercase__ : Union[str, Any] = 0
while queue:
lowercase__ : List[str] = queue.popleft()
lowercase__ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ : List[str] = current_distance + edge.weight
lowercase__ : Any = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ : int = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : List[Any]=None ):
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCAmelCase_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
a_ = OPTConfig
a_ = {}
a_ = """gelu"""
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Any=9_9 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=2_0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_6 , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = embed_dim
__lowerCAmelCase = word_embed_proj_dim
__lowerCAmelCase = False
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
__lowerCAmelCase = prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = TFOPTModel(config=lowerCAmelCase_ )
__lowerCAmelCase = inputs_dict['input_ids']
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict['attention_mask'][:1, :]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a_ = (TFOPTForCausalLM,) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
a_ = 10
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFOPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
__lowerCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
__lowerCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return tf.constant(lowerCAmelCase_, dtype=tf.intaa )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = 99
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
__lowerCAmelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
__lowerCAmelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Dict:
super().setUp()
__lowerCAmelCase = 'facebook/opt-350m'
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Optional[int] ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase ( self : int ) -> str:
__lowerCAmelCase = 'facebook/opt-125m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = 'left'
# use different length sentences to test batching
__lowerCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ )
__lowerCAmelCase = inputs['input_ids']
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs['attention_mask'] )
__lowerCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ )
__lowerCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
__lowerCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 284 | 0 |
from __future__ import annotations
from math import pow, sqrt
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger()
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self , snake_case ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def a ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
def __call__( self , snake_case ):
snake_case_ = Tracker(self.dest )(snake_case ).parametrized
snake_case_ = Tracker(self.src )(snake_case ).parametrized
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) )
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(snake_case )} operations while'''
F''' destination module has {len(snake_case )}.''' )
for dest_m, src_m in zip(snake_case , snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval()
snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ )
snake_case_ = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase__ )
assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one."
snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(UpperCamelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , )
# we can use the convnext one
snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , )
print(F'''Pushed {checkpoint_name}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
'''simple docstring'''
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = 1000
snake_case_ = (1, num_labels)
snake_case_ = 'huggingface/label-files'
snake_case_ = num_labels
snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
snake_case_ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
_UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 285 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.