code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case : List[Any] = Github(os.environ["GITHUB_TOKEN"] )
snake_case : Union[str, Any] = g.get_repo("huggingface/transformers" )
snake_case : Dict = repo.get_issues(state="open" )
for issue in open_issues:
snake_case : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase : i.created_at , reverse=lowercase )
snake_case : List[str] = comments[0] if len(lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 203 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__snake_case = logging.get_logger(__name__)
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case : Optional[int] = json.loads(lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case : Optional[int] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case : Any = json.loads(lowercase )
if not mpi_options.get("sagemaker_mpi_enabled" , lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : str = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCamelCase__ , )
@cached_property
def lowerCamelCase ( self ) -> "torch.device":
'''simple docstring'''
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
snake_case : Optional[Any] = torch.device("cpu" )
snake_case : List[Any] = 0
elif is_sagemaker_model_parallel_available():
snake_case : Tuple = smp.local_rank()
snake_case : int = torch.device("cuda" , UpperCamelCase__ )
snake_case : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
snake_case : Any = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
snake_case : Optional[Any] = torch.device("cuda" , self.local_rank )
snake_case : str = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case : List[str] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case : Optional[Any] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
snake_case : Any = torch.device("cuda" , self.local_rank )
snake_case : Dict = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCamelCase__ )
return device
@property
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
return False
| 203 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<sep>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<cls>" , UpperCamelCase_="<mask>" , UpperCamelCase_=["<eop>", "<eod>"] , UpperCamelCase_ = None , **UpperCamelCase_ , ):
lowercase_ :List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
lowercase_ :Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase_ :Optional[int] = 3
lowercase_ :Dict = do_lower_case
lowercase_ :Any = remove_space
lowercase_ :str = keep_accents
lowercase_ :Optional[Any] = vocab_file
lowercase_ :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase_ :Any = jieba
lowercase_ :Tuple = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase ( self ):
return len(self.sp_model )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase_ :Tuple = self.__dict__.copy()
lowercase_ :int = None
return state
def __setstate__( self , UpperCamelCase_ ):
lowercase_ :Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ :List[str] = {}
lowercase_ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self , UpperCamelCase_ ):
if self.remove_space:
lowercase_ :List[str] = ''' '''.join(inputs.strip().split() )
else:
lowercase_ :int = inputs
lowercase_ :Dict = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase_ :Union[str, Any] = unicodedata.normalize('''NFKD''' , UpperCamelCase_ )
lowercase_ :List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase_ )] )
if self.do_lower_case:
lowercase_ :Optional[int] = outputs.lower()
return outputs
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Tuple = self.preprocess_text(UpperCamelCase_ )
lowercase_ :Optional[int] = self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
lowercase_ :str = []
for piece in pieces:
if len(UpperCamelCase_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase_ :Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase_ :Tuple = cur_pieces[1:]
else:
lowercase_ :Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase_ )
else:
new_pieces.append(UpperCamelCase_ )
return new_pieces
def UpperCamelCase ( self , UpperCamelCase_ ):
return self.sp_model.PieceToId(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
return self.sp_model.IdToPiece(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :List[str] = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Optional[Any] = [self.sep_token_id]
lowercase_ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1, 1]
return ([0] * len(UpperCamelCase_ )) + [1, 1]
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :int = [self.sep_token_id]
lowercase_ :Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ :Dict = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase_ :List[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def UpperCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
lowercase_ :Any = super()._decode(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :Dict = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 370 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
lowercase_ :List[Any] = 1
lowercase_ :List[Any] = 3
lowercase_ :str = (32, 32)
lowercase_ :Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
def extract(*UpperCamelCase_ , **UpperCamelCase_ ):
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :Dict = torch.ones([0] )
def UpperCamelCase ( self , UpperCamelCase_ ):
self.pixel_values.to(UpperCamelCase_ )
return self
return Out()
return extract
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :List[Any] = self.dummy_cond_unet
lowercase_ :int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
lowercase_ :Any = self.dummy_vae
lowercase_ :Dict = self.dummy_text_encoder
lowercase_ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ :Optional[Any] = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Union[str, Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :int = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[Any] = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Any = output.images
lowercase_ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :List[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Dict = image[0, -3:, -3:, -1]
lowercase_ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ :List[Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :List[str] = self.dummy_cond_unet
lowercase_ :Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :Optional[Any] = self.dummy_vae
lowercase_ :List[Any] = self.dummy_text_encoder
lowercase_ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ :Tuple = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Optional[int] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Optional[Any] = output.images
lowercase_ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :List[str] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Dict = image[0, -3:, -3:, -1]
lowercase_ :str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ :Dict = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :List[str] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(pipe.scheduler , UpperCamelCase_ )
assert pipe.safety_checker is None
lowercase_ :Optional[int] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase_ )
lowercase_ :Union[str, Any] = StableDiffusionPipeline.from_pretrained(UpperCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase_ :List[Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.dummy_cond_unet
lowercase_ :Any = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :int = self.dummy_vae
lowercase_ :Tuple = self.dummy_text_encoder
lowercase_ :Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowercase_ :Optional[int] = unet.half()
lowercase_ :Union[str, Any] = vae.half()
lowercase_ :Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ :Any = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[str] = '''A painting of a squirrel eating a burger'''
lowercase_ :List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCamelCase_ )
lowercase_ :Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ :List[Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[Any] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowercase_ :str = 40_0366_0346
lowercase_ :Optional[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase_ :Tuple = torch.manual_seed(UpperCamelCase_ )
lowercase_ :int = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :List[str] = output.images
lowercase_ :int = image[0, -3:, -3:, -1]
lowercase_ :str = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase_ :Dict = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Any = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :int = output.images
lowercase_ :Union[str, Any] = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCamelCase_ )
lowercase_ :List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Optional[int] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowercase_ :Any = 27_3497_1755
lowercase_ :str = 7
lowercase_ :Optional[Any] = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Tuple = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :Optional[Any] = output.images
lowercase_ :str = image[0, -3:, -3:, -1]
lowercase_ :int = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase_ :Any = torch.manual_seed(UpperCamelCase_ )
lowercase_ :List[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :List[str] = output.images
lowercase_ :Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ :Optional[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowercase_ :Tuple = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[str] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowercase_ :Any = 10_4435_5234
lowercase_ :Union[str, Any] = 12
lowercase_ :str = torch.manual_seed(UpperCamelCase_ )
lowercase_ :str = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :Optional[int] = output.images
lowercase_ :str = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase_ :Dict = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Optional[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :Optional[Any] = output.images
lowercase_ :List[Any] = image[0, -3:, -3:, -1]
lowercase_ :Any = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 252 | 0 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class _snake_case ( lowercase_ ):
def __init__( self , *a__ , **a__ ) -> Tuple:
'''simple docstring'''
super().__init__(*a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(a__ )
snake_case_ = self.values[key]
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return (
sum(self.charge_factor - len(a__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase__ ( self , a__ , a__=None ) -> str:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(a__ ) == 0
):
return key
return super()._collision_resolution(a__ , a__ )
| 85 |
'''simple docstring'''
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(snake_case , snake_case )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 85 | 1 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = len(set_a.intersection(__lowerCamelCase ) )
if alternative_union:
lowercase_ = len(__lowerCamelCase ) + len(__lowerCamelCase )
else:
lowercase_ = len(set_a.union(__lowerCamelCase ) )
return intersection / union
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(__lowerCamelCase , (list, tuple) ):
lowercase_ = [element for element in set_a if element in set_b]
if alternative_union:
lowercase_ = len(__lowerCamelCase ) + len(__lowerCamelCase )
return len(__lowerCamelCase ) / union
else:
lowercase_ = set_a + [element for element in set_b if element not in set_a]
return len(__lowerCamelCase ) / len(__lowerCamelCase )
return len(__lowerCamelCase ) / len(__lowerCamelCase )
return None
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {"""a""", """b""", """c""", """d""", """e"""}
SCREAMING_SNAKE_CASE__ = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 297 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297 | 1 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCamelCase : Dict = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : List[str] = []
__UpperCamelCase : Union[str, Any] = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
__UpperCamelCase : List[Any] = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
'emoji': True,
},
}
]
__UpperCamelCase : Union[str, Any] = 0
for log in Path().glob('*.log'):
__UpperCamelCase : List[str] = 0
with open(log, 'r') as f:
for line in f:
__UpperCamelCase : List[Any] = json.loads(line)
if line.get('nodeid', '') != "":
__UpperCamelCase : Union[str, Any] = line['nodeid']
if line.get('duration', None) is not None:
__UpperCamelCase : int = f"""{line['duration']:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCamelCase : List[str] = []
log.unlink()
__UpperCamelCase : List[str] = ''
__UpperCamelCase : Optional[int] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCamelCase : List[str] = []
__UpperCamelCase : List[str] = {}
for test in failed_tests:
__UpperCamelCase : int = test[0].split('::')
__UpperCamelCase : str = data[0].split('/')[-1]
if data[0] not in filesafailed:
__UpperCamelCase : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCamelCase : Optional[int] = [test[0] for test in failed_table]
__UpperCamelCase : str = list(set(files))
# Count number of instances in failed_tests
__UpperCamelCase : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCamelCase : Dict = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__UpperCamelCase : List[Any] = 'Too many failed tests, please see the full report in the Action results.'
__UpperCamelCase : Any = len(err) + 10
__UpperCamelCase : List[str] = message[: 3000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
__UpperCamelCase : Tuple = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
__UpperCamelCase : int = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
__UpperCamelCase : Union[str, Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
__UpperCamelCase : int = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
__UpperCamelCase : Optional[int] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__UpperCamelCase : Optional[int] = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
__UpperCamelCase : List[str] = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCamelCase : List[Any] = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCamelCase : Tuple = row[0]
else:
__UpperCamelCase : Any = ''
__UpperCamelCase : Dict = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 182 | '''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self : List[str] , *a : Union[str, Any] , **a : Optional[Any] ):
"""simple docstring"""
super().__init__(*a , **a )
requires_backends(self , '''vision''' )
self.check_model_type(a )
def __call__( self : Any , a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a : Optional[int] ):
"""simple docstring"""
return super().__call__(a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Any ):
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = load_image(a )
__lowerCamelCase = image.size
__lowerCamelCase = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model(**a )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any ):
"""simple docstring"""
__lowerCamelCase = model_outputs.predicted_depth
__lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a )
__lowerCamelCase = prediction.squeeze().cpu().numpy()
__lowerCamelCase = (output * 2_55 / np.max(a )).astype('''uint8''' )
__lowerCamelCase = Image.fromarray(a )
__lowerCamelCase = {}
__lowerCamelCase = predicted_depth
__lowerCamelCase = depth
return output_dict
| 67 | 0 |
'''simple docstring'''
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __UpperCAmelCase ( a_: List[str]=None ):
if subparsers is not None:
_UpperCAmelCase : str = subparsers.add_parser("env" )
else:
_UpperCAmelCase : Any = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file", default=a_, help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Optional[Any] = torch.__version__
_UpperCAmelCase : int = torch.cuda.is_available()
_UpperCAmelCase : int = is_xpu_available()
_UpperCAmelCase : List[Any] = is_npu_available()
_UpperCAmelCase : Optional[int] = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(a_ ):
_UpperCAmelCase : str = load_config_from_file(args.config_file ).to_dict()
_UpperCAmelCase : int = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(a_ ),
"PyTorch NPU available": str(a_ ),
"System RAM": f"""{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB""",
}
if pt_cuda_available:
_UpperCAmelCase : List[str] = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
_UpperCAmelCase : Optional[int] = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(a_, a_ )
else f"""\t{accelerate_config}"""
)
print(a_ )
_UpperCAmelCase : Optional[int] = accelerate_config
return info
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = env_command_parser()
_UpperCAmelCase : int = parser.parse_args()
env_command(a_ )
return 0
if __name__ == "__main__":
raise SystemExit(main()) | 363 | '''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
_UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" )
if key.startswith("module.decoder" ):
_UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" )
if "norm" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
_UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" )
if "layer_norm1" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" )
if "layer_norm2" in key:
_UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )]
_UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" )
if "attn.q" in key:
_UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" )
if "attn.proj" in key:
_UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" )
if "attn" in key:
_UpperCAmelCase : Dict = key.replace("attn", "attention.self" )
if "fc1" in key:
_UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" )
if "fc2" in key:
_UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" )
if "linear_pred" in key:
_UpperCAmelCase : Any = key.replace("linear_pred", "classifier" )
if "linear_fuse" in key:
_UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" )
_UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )]
_UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" )
if "bot_conv" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" )
if "skip_conv1" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" )
if "skip_conv2" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" )
if "fusion1" in key:
_UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" )
if "fusion2" in key:
_UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" )
if "fusion3" in key:
_UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" )
if "fusion" in key and "conv" in key:
_UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
_UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" )
_UpperCAmelCase : int = value
return new_state_dict
def __UpperCAmelCase ( a_: str, a_: List[Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ):
_UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase : Dict = GLPNImageProcessor()
# prepare image
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
_UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) )
# rename keys
_UpperCAmelCase : List[str] = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_, a_ )
# create HuggingFace model and load state dict
_UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
_UpperCAmelCase : Dict = model(a_ )
_UpperCAmelCase : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_UpperCAmelCase : Tuple = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_UpperCAmelCase : Dict = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, )
image_processor.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__a = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 17 | 0 |
class __lowerCAmelCase : # Public class to implement a graph
def __init__( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = row
_lowerCAmelCase = col
_lowerCAmelCase = graph
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_lowerCAmelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
_lowerCAmelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase )
def snake_case ( self ): # And finally, count all islands.
"""simple docstring"""
_lowerCAmelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
_lowerCAmelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
count += 1
return count
| 82 |
import os
def _lowerCAmelCase ( )->Union[str, Any]:
'''simple docstring'''
snake_case_ = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) )
snake_case_ = os.path.join(lowerCAmelCase_ , "triangle.txt" )
with open(lowerCAmelCase_ ) as f:
snake_case_ = f.readlines()
snake_case_ = []
for line in triangle:
snake_case_ = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(lowerCAmelCase_ ) )
a.append(lowerCAmelCase_ )
for i in range(1 , len(lowerCAmelCase_ ) ):
for j in range(len(a[i] ) ):
snake_case_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
snake_case_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCAmelCase_ , lowerCAmelCase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 159 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
@dataclass
class __lowerCamelCase :
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
class __lowerCamelCase ( lowerCamelCase__ ):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class __lowerCamelCase :
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def A__ (lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase="[CLS]" , lowerCamelCase=1 , lowerCamelCase="[SEP]" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=-100 , lowerCamelCase=0 , lowerCamelCase=True , ):
'''simple docstring'''
_lowerCAmelCase = {label: i for i, label in enumerate(lowercase__ )}
_lowerCAmelCase = []
for ex_index, example in enumerate(lowercase__ ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" , lowercase__ , len(lowercase__ ) )
_lowerCAmelCase = []
_lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
_lowerCAmelCase = tokenizer.tokenize(lowercase__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowercase__ ) > 0:
tokens.extend(lowercase__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(lowercase__ ) > max_seq_length - special_tokens_count:
_lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
_lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_lowerCAmelCase = [sequence_a_segment_id] * len(lowercase__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_lowerCAmelCase = [cls_token] + tokens
_lowerCAmelCase = [pad_token_label_id] + label_ids
_lowerCAmelCase = [cls_token_segment_id] + segment_ids
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(lowercase__ )
# Zero-pad up to the sequence length.
_lowerCAmelCase = max_seq_length - len(lowercase__ )
if pad_on_left:
_lowerCAmelCase = ([pad_token] * padding_length) + input_ids
_lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
_lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowercase__ ) == max_seq_length
assert len(lowercase__ ) == max_seq_length
assert len(lowercase__ ) == max_seq_length
assert len(lowercase__ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(lowercase__ ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(lowercase__ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(lowercase__ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(lowercase__ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(lowercase__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , label_ids=lowercase__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __lowerCamelCase ( lowerCamelCase__ ):
__UpperCamelCase = 42
__UpperCamelCase = nn.CrossEntropyLoss().ignore_index
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase=False , lowerCamelCase = Split.train , ):
'''simple docstring'''
_lowerCAmelCase = os.path.join(
lowercase__ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(lowercase__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(lowercase__ ):
if os.path.exists(lowercase__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
_lowerCAmelCase = torch.load(lowercase__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
_lowerCAmelCase = token_classification_task.read_examples_from_file(lowercase__ , lowercase__ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , lowercase__ )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , lowerCamelCase ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase :
__UpperCamelCase = 42
__UpperCamelCase = -100
def __init__(self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase=False , lowerCamelCase = Split.train , ):
'''simple docstring'''
_lowerCAmelCase = token_classification_task.read_examples_from_file(lowercase__ , lowercase__ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = tf.data.Dataset.from_generator(
lowercase__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_lowerCAmelCase = tf.data.Dataset.from_generator(
lowercase__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , lowerCamelCase ):
'''simple docstring'''
return self.features[i] | 358 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }') | 317 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 251 | import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
def a__ ( __UpperCamelCase , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ""
else:
SCREAMING_SNAKE_CASE_ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_ = 1_0_0_0
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
SCREAMING_SNAKE_CASE_ = 1_9_2
SCREAMING_SNAKE_CASE_ = 7_6_8
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 3
elif deit_name[9:].startswith("small" ):
SCREAMING_SNAKE_CASE_ = 3_8_4
SCREAMING_SNAKE_CASE_ = 1_5_3_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = 4_0_9_6
SCREAMING_SNAKE_CASE_ = 2_4
SCREAMING_SNAKE_CASE_ = 1_6
# load original model from timm
SCREAMING_SNAKE_CASE_ = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCamelCase , __UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = DeiTForImageClassificationWithTeacher(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE_ = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE_ = DeiTImageProcessor(size=__UpperCamelCase , crop_size=config.image_size )
SCREAMING_SNAKE_CASE_ = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE_ = encoding["pixel_values"]
SCREAMING_SNAKE_CASE_ = model(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A : Dict = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 118 | 0 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.txt'}
_a = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
_a = {
'openbmb/cpm-ant-10b': 10_24,
}
def _A ( UpperCamelCase_ : Tuple) -> str:
'''simple docstring'''
__lowercase = collections.OrderedDict()
with open(UpperCamelCase_, "r", encoding="utf-8") as reader:
__lowercase = reader.readlines()
for index, token in enumerate(UpperCamelCase_):
__lowercase = token.rstrip("\n")
__lowercase = index
return vocab
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : int, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any]="<unk>", UpperCAmelCase__ : Optional[Any]=2_0_0 ):
__lowercase = vocab
__lowercase = unk_token
__lowercase = max_input_chars_per_word
def _lowercase ( self : List[Any], UpperCAmelCase__ : List[Any] ):
__lowercase = list(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowercase = 0
__lowercase = []
while start < len(UpperCAmelCase__ ):
__lowercase = len(UpperCAmelCase__ )
__lowercase = None
while start < end:
__lowercase = "".join(chars[start:end] )
if substr in self.vocab:
__lowercase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCAmelCase__ )
__lowercase = end
return sub_tokens
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = ["input_ids", "attention_mask"]
__UpperCAmelCase : str = False
def __init__( self : str, UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any]="<d>", UpperCAmelCase__ : str="</d>", UpperCAmelCase__ : Optional[Any]="<s>", UpperCAmelCase__ : List[str]="</s>", UpperCAmelCase__ : Any="<pad>", UpperCAmelCase__ : Tuple="<unk>", UpperCAmelCase__ : int="</n>", UpperCAmelCase__ : Union[str, Any]="</_>", UpperCAmelCase__ : Optional[int]="left", **UpperCAmelCase__ : Tuple, ):
requires_backends(self, ["jieba"] )
super().__init__(
bod_token=UpperCAmelCase__, eod_token=UpperCAmelCase__, bos_token=UpperCAmelCase__, eos_token=UpperCAmelCase__, pad_token=UpperCAmelCase__, unk_token=UpperCAmelCase__, line_token=UpperCAmelCase__, space_token=UpperCAmelCase__, padding_side=UpperCAmelCase__, **UpperCAmelCase__, )
__lowercase = bod_token
__lowercase = eod_token
__lowercase = load_vocab(UpperCAmelCase__ )
__lowercase = self.encoder[space_token]
__lowercase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowercase = collections.OrderedDict(sorted(self.encoder.items(), key=lambda UpperCAmelCase__ : x[1] ) )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = WordpieceTokenizer(vocab=self.encoder, unk_token=self.unk_token )
@property
def _lowercase ( self : Dict ):
return self.encoder[self.bod_token]
@property
def _lowercase ( self : Any ):
return self.encoder[self.eod_token]
@property
def _lowercase ( self : Any ):
return self.encoder["\n"]
@property
def _lowercase ( self : str ):
return len(self.encoder )
def _lowercase ( self : Tuple ):
return dict(self.encoder, **self.added_tokens_encoder )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Any] ):
__lowercase = []
for x in jieba.cut(UpperCAmelCase__, cut_all=UpperCAmelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCAmelCase__ ) )
return output_tokens
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Optional[int], **UpperCAmelCase__ : List[str] ):
__lowercase = [i for i in token_ids if i >= 0]
__lowercase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int ):
return token in self.encoder
def _lowercase ( self : Optional[int], UpperCAmelCase__ : List[str] ):
return "".join(UpperCAmelCase__ )
def _lowercase ( self : int, UpperCAmelCase__ : Union[str, Any] ):
return self.encoder.get(UpperCAmelCase__, self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : List[str] ):
return self.decoder.get(UpperCAmelCase__, self.unk_token )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[str] = None ):
if os.path.isdir(UpperCAmelCase__ ):
__lowercase = os.path.join(
UpperCAmelCase__, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
__lowercase = (filename_prefix + "-" if filename_prefix else "") + save_directory
__lowercase = 0
if " " in self.encoder:
__lowercase = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
__lowercase = self.encoder["\n"]
del self.encoder["\n"]
__lowercase = collections.OrderedDict(sorted(self.encoder.items(), key=lambda UpperCAmelCase__ : x[1] ) )
with open(UpperCAmelCase__, "w", encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
__lowercase = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _lowercase ( self : Any, UpperCAmelCase__ : List[int], UpperCAmelCase__ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[int], UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__, token_ids_a=UpperCAmelCase__, already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ ))
| 144 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
__UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase ( self : Union[str, Any] ):
__lowercase = nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
UpperCAmelCase__, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(UpperCAmelCase__ )
__lowercase = nn.Conv(
UpperCAmelCase__, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(UpperCAmelCase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.conv_in(UpperCAmelCase__ )
__lowercase = nn.silu(UpperCAmelCase__ )
for block in self.blocks:
__lowercase = block(UpperCAmelCase__ )
__lowercase = nn.silu(UpperCAmelCase__ )
__lowercase = self.conv_out(UpperCAmelCase__ )
return embedding
@flax_register_to_config
class _lowerCAmelCase ( nn.Module ,lowercase ,lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = 3_2
__UpperCAmelCase : int = 4
__UpperCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__UpperCAmelCase : Union[bool, Tuple[bool]] = False
__UpperCAmelCase : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__UpperCAmelCase : int = 2
__UpperCAmelCase : Union[int, Tuple[int]] = 8
__UpperCAmelCase : Optional[Union[int, Tuple[int]]] = None
__UpperCAmelCase : int = 1_2_8_0
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : bool = False
__UpperCAmelCase : jnp.dtype = jnp.floataa
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : str = "rgb"
__UpperCAmelCase : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def _lowercase ( self : List[Any], UpperCAmelCase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(UpperCAmelCase__, dtype=jnp.floataa )
__lowercase = jnp.ones((1,), dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(UpperCAmelCase__, dtype=jnp.floataa )
__lowercase ,__lowercase = jax.random.split(UpperCAmelCase__ )
__lowercase = {"params": params_rng, "dropout": dropout_rng}
return self.init(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )["params"]
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
__lowercase = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(UpperCAmelCase__, dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
__lowercase = self.only_cross_attention
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
UpperCAmelCase__, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(UpperCAmelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(UpperCAmelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase__, out_channels=UpperCAmelCase__, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
__lowercase = FlaxDownBlockaD(
in_channels=UpperCAmelCase__, out_channels=UpperCAmelCase__, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(UpperCAmelCase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
UpperCAmelCase__, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(UpperCAmelCase__ )
if not is_final_block:
__lowercase = nn.Conv(
UpperCAmelCase__, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(UpperCAmelCase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCAmelCase__, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
__lowercase = nn.Conv(
UpperCAmelCase__, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : str, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str], UpperCAmelCase__ : float = 1.0, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : bool = False, ):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(UpperCAmelCase__, axis=1 )
# 1. time
if not isinstance(UpperCAmelCase__, jnp.ndarray ):
__lowercase = jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(UpperCAmelCase__, jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(UpperCAmelCase__, 0 )
__lowercase = self.time_proj(UpperCAmelCase__ )
__lowercase = self.time_embedding(UpperCAmelCase__ )
# 2. pre-process
__lowercase = jnp.transpose(UpperCAmelCase__, (0, 2, 3, 1) )
__lowercase = self.conv_in(UpperCAmelCase__ )
__lowercase = jnp.transpose(UpperCAmelCase__, (0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(UpperCAmelCase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase = down_block(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, deterministic=not train )
else:
__lowercase ,__lowercase = down_block(UpperCAmelCase__, UpperCAmelCase__, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(UpperCAmelCase__, self.controlnet_down_blocks ):
__lowercase = controlnet_block(UpperCAmelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(UpperCAmelCase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCAmelCase__, mid_block_res_sample=UpperCAmelCase__ )
| 144 | 1 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase_ :
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
raise NotImplementedError()
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError()
class lowerCAmelCase_ (__UpperCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = skip_prompt
SCREAMING_SNAKE_CASE__ : Optional[int] = decode_kwargs
# variables used in the streaming process
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : int = True
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
SCREAMING_SNAKE_CASE__ : Dict = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
SCREAMING_SNAKE_CASE__ : Tuple = text[self.print_len :]
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Tuple = 0
# If the last token is a CJK character, we print the characters.
elif len(a_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = text[self.print_len :]
self.print_len += len(a_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(a_ )
self.on_finalized_text(a_ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
if len(self.token_cache ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
SCREAMING_SNAKE_CASE__ : str = text[self.print_len :]
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : Any = 0
else:
SCREAMING_SNAKE_CASE__ : int = ''''''
SCREAMING_SNAKE_CASE__ : List[Any] = True
self.on_finalized_text(a_ , stream_end=a_ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> Optional[Any]:
"""simple docstring"""
print(a_ , flush=a_ , end="""""" if not stream_end else None )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
class lowerCAmelCase_ (__UpperCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
super().__init__(a_ , a_ , **a_ )
SCREAMING_SNAKE_CASE__ : Dict = Queue()
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : str = timeout
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> Union[str, Any]:
"""simple docstring"""
self.text_queue.put(a_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__(self ) -> Optional[Any]:
"""simple docstring"""
return self
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 25 |
def a ( _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = 0
__UpperCAmelCase : str = len(_UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a ( _UpperCAmelCase : str ):
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return arr, 0
__UpperCAmelCase : Dict = len(_UpperCAmelCase ) // 2
__UpperCAmelCase : Union[str, Any] = arr[0:mid]
__UpperCAmelCase : Optional[Any] = arr[mid:]
__UpperCAmelCase , __UpperCAmelCase : Tuple = count_inversions_recursive(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = count_inversions_recursive(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = _count_cross_inversions(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[str] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Any = 0
while i < len(_UpperCAmelCase ) and j < len(_UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__UpperCAmelCase : List[str] = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , _UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__UpperCAmelCase : Any = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _UpperCAmelCase )
# an empty list should also have zero inversions
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 226 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class snake_case__ :
A__ = XGLMConfig
A__ = {}
A__ = '''gelu'''
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : int=14 , __a : Dict=7 , __a : Optional[Any]=True , __a : Optional[int]=True , __a : List[Any]=True , __a : Optional[int]=99 , __a : Union[str, Any]=32 , __a : Union[str, Any]=2 , __a : List[str]=4 , __a : Optional[int]=37 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : str=0.1 , __a : int=512 , __a : Tuple=0.0_2 , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Any = parent
__snake_case : List[Any] = batch_size
__snake_case : List[Any] = seq_length
__snake_case : str = is_training
__snake_case : str = use_input_mask
__snake_case : Tuple = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : Dict = d_model
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[int] = ffn_dim
__snake_case : Tuple = activation_function
__snake_case : List[Any] = activation_dropout
__snake_case : str = attention_dropout
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Optional[int] = initializer_range
__snake_case : Any = None
__snake_case : Any = 0
__snake_case : Dict = 2
__snake_case : Union[str, Any] = 1
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__snake_case : List[str] = None
if self.use_input_mask:
__snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : int = self.get_config()
__snake_case : List[str] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__a , )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = self.prepare_config_and_inputs()
(
__snake_case
) : List[Any] = config_and_inputs
__snake_case : Union[str, Any] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
A__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
A__ = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
A__ = False
A__ = False
A__ = False
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = TFXGLMModelTester(self )
__snake_case : str = ConfigTester(self , config_class=__a , n_embd=37 )
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def A_ ( self : Dict ) -> int:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = TFXGLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def A_ ( self : Any ) -> str:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Optional[Any] , __a : Optional[Any]=True ) -> Any:
'''simple docstring'''
__snake_case : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__snake_case : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__snake_case : str = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__snake_case : int = model.generate(__a , do_sample=__a , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __a )
@slow
def A_ ( self : Any ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__snake_case : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__snake_case : int = tokenizer('Today is a nice day and' , return_tensors='tf' )
__snake_case : Any = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__snake_case : Dict = model.generate(__a , do_sample=__a , seed=[7, 0] )
__snake_case : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=__a )
__snake_case : str = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(__a , __a )
@slow
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__snake_case : Dict = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__snake_case : int = 'left'
# use different length sentences to test batching
__snake_case : Optional[Any] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__snake_case : int = tokenizer(__a , return_tensors='tf' , padding=__a )
__snake_case : List[Any] = inputs['input_ids']
__snake_case : Tuple = model.generate(input_ids=__a , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__snake_case : List[str] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__snake_case : List[Any] = model.generate(input_ids=__a , max_new_tokens=12 )
__snake_case : List[str] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__snake_case : List[str] = model.generate(input_ids=__a , max_new_tokens=12 )
__snake_case : Optional[int] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
__snake_case : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
__snake_case : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
__snake_case : int = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
| 354 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : Union[str, Any] = 'tf'
def A_ ( self : Dict , __a : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : Any , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Tuple = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(__a )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Optional[Any] = MagicMock(return_value=__a )
__snake_case : Tuple = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : str = MagicMock(return_value=__a )
__snake_case : List[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = """Hello world! cécé herlolip"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = FairseqRobertaModel.from_pretrained(_UpperCamelCase )
roberta.eval() # disable dropout
__lowerCAmelCase : Any = roberta.model.encoder.sentence_encoder
__lowerCAmelCase : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__lowerCAmelCase : Optional[Any] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , _UpperCamelCase )
__lowerCAmelCase : Tuple = XLMRobertaXLForSequenceClassification(_UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCAmelCase : Optional[Any] = roberta_sent_encoder.embed_tokens.weight
__lowerCAmelCase : str = roberta_sent_encoder.embed_positions.weight
__lowerCAmelCase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowerCAmelCase : int = roberta_sent_encoder.layer_norm.weight
__lowerCAmelCase : Optional[int] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCAmelCase : BertLayer = model.roberta.encoder.layer[i]
__lowerCAmelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
__lowerCAmelCase : RobertaAttention = layer.attention
__lowerCAmelCase : int = roberta_layer.self_attn_layer_norm.weight
__lowerCAmelCase : Optional[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowerCAmelCase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.q_proj.weight
__lowerCAmelCase : Dict = roberta_layer.self_attn.q_proj.bias
__lowerCAmelCase : int = roberta_layer.self_attn.k_proj.weight
__lowerCAmelCase : Any = roberta_layer.self_attn.k_proj.bias
__lowerCAmelCase : Optional[int] = roberta_layer.self_attn.v_proj.weight
__lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowerCAmelCase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
__lowerCAmelCase : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowerCAmelCase : str = roberta_layer.final_layer_norm.weight
__lowerCAmelCase : str = roberta_layer.final_layer_norm.bias
# intermediate
__lowerCAmelCase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCAmelCase : str = roberta_layer.fca.weight
__lowerCAmelCase : Optional[int] = roberta_layer.fca.bias
# output
__lowerCAmelCase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCAmelCase : Tuple = roberta_layer.fca.weight
__lowerCAmelCase : Union[str, Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowerCAmelCase : List[str] = roberta.model.classification_heads['mnli'].dense.weight
__lowerCAmelCase : Tuple = roberta.model.classification_heads['mnli'].dense.bias
__lowerCAmelCase : List[Any] = roberta.model.classification_heads['mnli'].out_proj.weight
__lowerCAmelCase : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowerCAmelCase : int = roberta.model.encoder.lm_head.dense.weight
__lowerCAmelCase : int = roberta.model.encoder.lm_head.dense.bias
__lowerCAmelCase : Optional[int] = roberta.model.encoder.lm_head.layer_norm.weight
__lowerCAmelCase : List[Any] = roberta.model.encoder.lm_head.layer_norm.bias
__lowerCAmelCase : Any = roberta.model.encoder.lm_head.weight
__lowerCAmelCase : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCAmelCase : torch.Tensor = roberta.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1
__lowerCAmelCase : Any = model(_UpperCamelCase )[0]
if classification_head:
__lowerCAmelCase : Optional[Any] = roberta.model.classification_heads['mnli'](roberta.extract_features(_UpperCamelCase ) )
else:
__lowerCAmelCase : List[Any] = roberta.model(_UpperCamelCase )[0]
print(our_output.shape , their_output.shape )
__lowerCAmelCase : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__lowerCAmelCase : Optional[Any] = torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCamelCase__ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 86 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : int = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = "sew-d"
def __init__( self : Any , UpperCAmelCase_ : str=3_2 , UpperCAmelCase_ : List[Any]=7_6_8 , UpperCAmelCase_ : int=1_2 , UpperCAmelCase_ : List[str]=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=5_1_2 , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Dict=("p2c", "c2p") , UpperCAmelCase_ : List[Any]="layer_norm" , UpperCAmelCase_ : List[str]="gelu_python" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Dict=1e-7 , UpperCAmelCase_ : List[Any]=1e-5 , UpperCAmelCase_ : Union[str, Any]="group" , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase_ : Union[str, Any]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[int]=1_2_8 , UpperCAmelCase_ : Tuple=1_6 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=0.05 , UpperCAmelCase_ : Optional[int]=1_0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Any="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Any=2_5_6 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Dict=2 , **UpperCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : int = hidden_size
a : List[str] = feat_extract_norm
a : str = feat_extract_activation
a : Optional[int] = list(UpperCAmelCase_)
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : Dict = conv_bias
a : Optional[Any] = num_conv_pos_embeddings
a : Any = num_conv_pos_embedding_groups
a : int = len(self.conv_dim)
a : int = num_hidden_layers
a : Dict = intermediate_size
a : int = squeeze_factor
a : Optional[Any] = max_position_embeddings
a : Optional[int] = position_buckets
a : Optional[Any] = share_att_key
a : List[str] = relative_attention
a : Tuple = norm_rel_ebd
a : Union[str, Any] = list(UpperCAmelCase_)
a : int = hidden_act
a : List[Any] = num_attention_heads
a : str = hidden_dropout
a : List[str] = attention_dropout
a : Union[str, Any] = activation_dropout
a : List[str] = feat_proj_dropout
a : List[str] = final_dropout
a : Any = layer_norm_eps
a : Tuple = feature_layer_norm_eps
a : str = initializer_range
a : Optional[int] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : Optional[Any] = apply_spec_augment
a : List[str] = mask_time_prob
a : Tuple = mask_time_length
a : Union[str, Any] = mask_time_min_masks
a : Dict = mask_feature_prob
a : Union[str, Any] = mask_feature_length
a : Optional[Any] = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : str = ctc_zero_infinity
# sequence classification
a : str = use_weighted_layer_sum
a : Optional[int] = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 345 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 1 |
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
assert column_title.isupper()
a :Tuple = 0
a :Any = len(UpperCAmelCase_ ) - 1
a :Tuple = 0
while index >= 0:
a :str = (ord(column_title[index] ) - 64) * pow(26 , UpperCAmelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 94 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
snake_case : int = '''Create a default config file for Accelerate with only a few flags set.'''
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any]="no" , UpperCAmelCase_ : str = default_json_config_file , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
a :List[str] = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
a :Optional[Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
a :List[Any] = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
a :Dict = torch.cuda.device_count()
a :Tuple = num_gpus
a :int = False
if num_gpus > 1:
a :str = '''MULTI_GPU'''
else:
a :List[Any] = '''NO'''
elif is_xpu_available() and use_xpu:
a :List[Any] = torch.xpu.device_count()
a :Optional[int] = num_xpus
a :List[Any] = False
if num_xpus > 1:
a :int = '''MULTI_XPU'''
else:
a :str = '''NO'''
elif is_npu_available():
a :List[str] = torch.npu.device_count()
a :Any = num_npus
a :Optional[int] = False
if num_npus > 1:
a :List[str] = '''MULTI_NPU'''
else:
a :Dict = '''NO'''
else:
a :str = 0
a :Optional[Any] = True
a :Optional[Any] = 1
a :str = '''NO'''
a :List[str] = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
a :List[Any] = parser.add_parser('''default''' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'''--config_file''' , default=UpperCAmelCase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=UpperCAmelCase_ , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
a :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 94 | 1 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def UpperCamelCase( UpperCAmelCase_ ):
print('Loading config file...' )
def flatten_yaml_as_dict(UpperCAmelCase_ , UpperCAmelCase_="" , UpperCAmelCase_="." ):
UpperCAmelCase : Union[str, Any] = []
for k, v in d.items():
UpperCAmelCase : Dict = parent_key + sep + k if parent_key else k
if isinstance(UpperCAmelCase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCAmelCase_ , UpperCAmelCase_ , sep=UpperCAmelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(UpperCAmelCase_ )
UpperCAmelCase : int = argparse.Namespace()
with open(UpperCAmelCase_ , 'r' ) as yaml_file:
try:
UpperCAmelCase : int = yaml.load(UpperCAmelCase_ , Loader=yaml.FullLoader )
UpperCAmelCase : List[Any] = flatten_yaml_as_dict(UpperCAmelCase_ )
for k, v in flat_cfg.items():
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(UpperCAmelCase_ , str(UpperCAmelCase_ ) ) )
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = MobileViTVaConfig()
UpperCAmelCase : str = False
# dataset
if task_name.startswith('imagenet1k_' ):
UpperCAmelCase : Optional[int] = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
UpperCAmelCase : Union[str, Any] = 3_84
else:
UpperCAmelCase : Optional[Any] = 2_56
UpperCAmelCase : List[Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
UpperCAmelCase : Optional[int] = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
UpperCAmelCase : List[Any] = 3_84
else:
UpperCAmelCase : Optional[int] = 2_56
UpperCAmelCase : str = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
UpperCAmelCase : List[str] = 1_51
UpperCAmelCase : Union[str, Any] = 5_12
UpperCAmelCase : Optional[int] = 'ade20k-id2label.json'
UpperCAmelCase : Tuple = True
elif task_name.startswith('voc_' ):
UpperCAmelCase : Union[str, Any] = 21
UpperCAmelCase : int = 5_12
UpperCAmelCase : List[str] = 'pascal-voc-id2label.json'
UpperCAmelCase : Any = True
# orig_config
UpperCAmelCase : Optional[Any] = load_orig_config_file(UpperCAmelCase_ )
assert getattr(UpperCAmelCase_ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase : Any = getattr(UpperCAmelCase_ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(UpperCAmelCase_ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase : int = getattr(UpperCAmelCase_ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase : str = getattr(UpperCAmelCase_ , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase : List[Any] = getattr(UpperCAmelCase_ , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
UpperCAmelCase : int = getattr(UpperCAmelCase_ , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 )
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase_ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
UpperCAmelCase : Optional[Any] = 'huggingface/label-files'
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase : Optional[Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = dct.pop(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = val
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=False ):
if base_model:
UpperCAmelCase : Optional[Any] = ''
else:
UpperCAmelCase : List[str] = 'mobilevitv2.'
UpperCAmelCase : str = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase : Union[str, Any] = k[8:]
else:
UpperCAmelCase : List[Any] = k
if ".block." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('.block.' , '.' )
if ".conv." in k:
UpperCAmelCase : Any = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('conv_1.' , F"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if F"""layer_{i}.""" in k:
UpperCAmelCase : Tuple = k_new.replace(F"""layer_{i}.""" , F"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
UpperCAmelCase : str = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F"""layer_{i}.0.""" in k:
UpperCAmelCase : Any = k_new.replace(F"""layer_{i}.0.""" , F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if F"""layer_{i}.1.local_rep.0.""" in k:
UpperCAmelCase : Dict = k_new.replace(F"""layer_{i}.1.local_rep.0.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if F"""layer_{i}.1.local_rep.1.""" in k:
UpperCAmelCase : List[Any] = k_new.replace(F"""layer_{i}.1.local_rep.1.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase : int = [0, 1]
elif i == 4:
UpperCAmelCase : Optional[Any] = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase : Union[str, Any] = [0, 1, 2]
for j in j_in:
if F"""layer_{i}.1.global_rep.{j}.""" in k:
UpperCAmelCase : Optional[int] = k_new.replace(
F"""layer_{i}.1.global_rep.{j}.""" , F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if F"""layer_{i}.1.global_rep.{j+1}.""" in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(
F"""layer_{i}.1.global_rep.{j+1}.""" , F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if F"""layer_{i}.1.conv_proj.""" in k:
UpperCAmelCase : Tuple = k_new.replace(F"""layer_{i}.1.conv_proj.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
UpperCAmelCase : int = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
UpperCAmelCase : Union[str, Any] = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase : Any = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase : Optional[Any] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase : int = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
UpperCAmelCase : Optional[int] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
UpperCAmelCase : List[str] = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
UpperCAmelCase : Optional[int] = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
UpperCAmelCase : int = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(UpperCAmelCase_ )
for k in keys_to_ignore:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = get_mobilevitva_config(UpperCAmelCase_ , UpperCAmelCase_ )
# load original state_dict
UpperCAmelCase : str = torch.load(UpperCAmelCase_ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
UpperCAmelCase : Any = MobileViTVaForSemanticSegmentation(UpperCAmelCase_ ).eval()
UpperCAmelCase : Any = False
else:
UpperCAmelCase : str = MobileViTVaForImageClassification(UpperCAmelCase_ ).eval()
UpperCAmelCase : Tuple = False
# remove and rename some keys of load the original model
UpperCAmelCase : str = checkpoint
remove_unused_keys(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = create_rename_keys(UpperCAmelCase_ , base_model=UpperCAmelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load modified state_dict
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase : List[str] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase : int = image_processor(images=prepare_img() , return_tensors='pt' )
UpperCAmelCase : int = model(**UpperCAmelCase_ )
# verify classification model
if task_name.startswith('imagenet' ):
UpperCAmelCase : Dict = outputs.logits
UpperCAmelCase : Union[str, Any] = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1E-4 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 365 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """perceiver"""
def __init__( self : str , lowercase_ : List[str]=256 , lowercase_ : List[str]=1_280 , lowercase_ : str=768 , lowercase_ : Tuple=1 , lowercase_ : str=26 , lowercase_ : List[Any]=8 , lowercase_ : int=8 , lowercase_ : List[str]=None , lowercase_ : Dict=None , lowercase_ : int="kv" , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=1 , lowercase_ : Any="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=262 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Optional[int]=56 , lowercase_ : int=[368, 496] , lowercase_ : str=16 , lowercase_ : Optional[int]=1_920 , lowercase_ : Tuple=16 , lowercase_ : int=[1, 16, 224, 224] , **lowercase_ : Union[str, Any] , ) -> List[str]:
super().__init__(**lowercase_ )
UpperCAmelCase : Union[str, Any] = num_latents
UpperCAmelCase : List[Any] = d_latents
UpperCAmelCase : Dict = d_model
UpperCAmelCase : Dict = num_blocks
UpperCAmelCase : Optional[int] = num_self_attends_per_block
UpperCAmelCase : Optional[Any] = num_self_attention_heads
UpperCAmelCase : Optional[Any] = num_cross_attention_heads
UpperCAmelCase : Tuple = qk_channels
UpperCAmelCase : Optional[int] = v_channels
UpperCAmelCase : str = cross_attention_shape_for_attention
UpperCAmelCase : Union[str, Any] = self_attention_widening_factor
UpperCAmelCase : List[Any] = cross_attention_widening_factor
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Any = use_query_residual
# masked language modeling attributes
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : List[Any] = max_position_embeddings
# image classification attributes
UpperCAmelCase : str = image_size
# flow attributes
UpperCAmelCase : Any = train_size
# multimodal autoencoding attributes
UpperCAmelCase : Any = num_frames
UpperCAmelCase : List[Any] = audio_samples_per_frame
UpperCAmelCase : Tuple = samples_per_patch
UpperCAmelCase : Union[str, Any] = output_shape
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self : str ) -> float:
return 1E-4
def UpperCAmelCase_ ( self : str , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowercase_ , lowercase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : int = preprocessor.num_special_tokens_to_add(lowercase_ )
UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : List[Any] = [' '.join(['a'] ) * seq_length] * batch_size
UpperCAmelCase : Union[str, Any] = dict(preprocessor(lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Union[str, Any] = inputs.pop('input_ids' )
return inputs
elif isinstance(lowercase_ , lowercase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase : Any = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Dict = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 280 | 0 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( UpperCamelCase ):
return (data["data"], data["target"])
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
A = xgb.predict(UpperCamelCase )
A = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def A__ ( ):
A = fetch_california_housing()
A, A = data_handling(UpperCamelCase )
A, A, A, A = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
A = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(F"Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}" )
print(F"Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 292 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[Any] ):
pass
def A__ ( UpperCamelCase ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] ):
A = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[Any] ):
A = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __UpperCamelCase )
import datasets
A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase ( self :Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
A = "Intel/dpt-large"
A = pipeline("depth-estimation" , model=__UpperCamelCase )
A = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase ( self :Optional[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 292 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowercase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowercase : Union[str, Any] = """xvjiarui/stable-diffusion-2-inpainting"""
__lowercase , __lowercase : List[str] = FlaxStableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
__lowercase : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowercase : List[str] = jax.random.PRNGKey(0 )
__lowercase : List[Any] = 50
__lowercase : str = jax.device_count()
__lowercase : List[str] = num_samples * [prompt]
__lowercase : int = num_samples * [init_image]
__lowercase : Union[str, Any] = num_samples * [mask_image]
__lowercase , __lowercase , __lowercase : Union[str, Any] = pipeline.prepare_inputs(__a , __a , __a )
# shard inputs and rng
__lowercase : Tuple = replicate(__a )
__lowercase : List[str] = jax.random.split(__a , jax.device_count() )
__lowercase : List[str] = shard(__a )
__lowercase : int = shard(__a )
__lowercase : List[Any] = shard(__a )
__lowercase : Optional[int] = pipeline(
__a , __a , __a , __a , __a , __a , jit=__a )
__lowercase : List[str] = output.images.reshape(__a , 512 , 512 , 3 )
__lowercase : List[Any] = images[0, 253:256, 253:256, -1]
__lowercase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase : Optional[Any] = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 306 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a ) | 306 | 1 |
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : int , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=None ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = data
snake_case : Tuple = previous
snake_case : Tuple = next_node
def __str__(self : Optional[Any] ) -> str:
'''simple docstring'''
return f"""{self.data}"""
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
return self.data
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.next
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.previous
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : Any ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = head
def __iter__(self : Union[str, Any] ) -> Any:
'''simple docstring'''
return self
def _SCREAMING_SNAKE_CASE (self : str ) -> Any:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
snake_case : str = self.current.get_data()
snake_case : Optional[Any] = self.current.get_next()
return value
class UpperCAmelCase :
def __init__(self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Tuple = None # First node in list
snake_case : int = None # Last node in list
def __str__(self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = self.head
snake_case : Dict = []
while current is not None:
nodes.append(current.get_data() )
snake_case : Dict = current.get_next()
return " ".join(str(_snake_case ) for node in nodes )
def __contains__(self : Any , snake_case__ : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.head
while current:
if current.get_data() == value:
return True
snake_case : Tuple = current.get_next()
return False
def __iter__(self : Any ) -> Union[str, Any]:
'''simple docstring'''
return LinkedListIterator(self.head )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
snake_case : Dict = node
snake_case : Dict = node
else:
self.insert_before_node(self.head , _snake_case )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(_snake_case )
else:
self.insert_after_node(self.tail , _snake_case )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : int ) -> None:
'''simple docstring'''
snake_case : Any = Node(_snake_case )
if self.head is None:
self.set_head(_snake_case )
else:
self.set_tail(_snake_case )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Node , snake_case__ : Node ) -> None:
'''simple docstring'''
snake_case : Tuple = node
snake_case : Optional[Any] = node.previous
if node.get_previous() is None:
snake_case : Optional[int] = node_to_insert
else:
snake_case : List[str] = node_to_insert
snake_case : Tuple = node_to_insert
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Node , snake_case__ : Node ) -> None:
'''simple docstring'''
snake_case : Dict = node
snake_case : str = node.next
if node.get_next() is None:
snake_case : int = node_to_insert
else:
snake_case : Optional[int] = node_to_insert
snake_case : Tuple = node_to_insert
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
snake_case : Dict = 1
snake_case : Optional[Any] = Node(_snake_case )
snake_case : Any = self.head
while node:
if current_position == position:
self.insert_before_node(_snake_case , _snake_case )
return
current_position += 1
snake_case : Optional[Any] = node.next
self.insert_after_node(self.tail , _snake_case )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : int ) -> Node:
'''simple docstring'''
snake_case : Any = self.head
while node:
if node.get_data() == item:
return node
snake_case : Tuple = node.get_next()
raise Exception("Node not found" )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : List[Any] ) -> str:
'''simple docstring'''
if (node := self.get_node(_snake_case )) is not None:
if node == self.head:
snake_case : List[Any] = self.head.get_next()
if node == self.tail:
snake_case : List[Any] = self.tail.get_previous()
self.remove_node_pointers(_snake_case )
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : Node ) -> None:
'''simple docstring'''
if node.get_next():
snake_case : int = node.previous
if node.get_previous():
snake_case : Optional[int] = node.next
snake_case : List[Any] = None
snake_case : Tuple = None
def _SCREAMING_SNAKE_CASE (self : str ) -> str:
'''simple docstring'''
return self.head is None
def UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class snake_case_ :
def __init__( self :Tuple ,__snake_case :Optional[int] ,__snake_case :List[Any]=13 ,__snake_case :Dict=7 ,__snake_case :Tuple=True ,__snake_case :List[Any]=True ,__snake_case :int=True ,__snake_case :List[str]=99 ,__snake_case :List[str]=32 ,__snake_case :Tuple=5 ,__snake_case :Dict=4 ,__snake_case :str=37 ,__snake_case :int="gelu" ,__snake_case :Dict=0.1 ,__snake_case :Optional[int]=0.1 ,__snake_case :int=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :Tuple=2 ,__snake_case :int=0.02 ,__snake_case :Any=3 ,__snake_case :List[str]=4 ,__snake_case :List[Any]=None ,) -> Optional[int]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :str ) -> str:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Dict ,__snake_case :Tuple ,__snake_case :List[str] ,__snake_case :Tuple ,__snake_case :Tuple ,*__snake_case :int ) -> Tuple:
a__ = OpenAIGPTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
a__ = model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Dict ,__snake_case :Tuple ,__snake_case :str ,__snake_case :Any ,*__snake_case :Tuple ) -> Union[str, Any]:
a__ = OpenAIGPTLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Tuple ,__snake_case :Tuple ,__snake_case :str ,__snake_case :Optional[Any] ,__snake_case :int ,*__snake_case :List[str] ) -> List[Any]:
a__ = OpenAIGPTDoubleHeadsModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Any ,__snake_case :Dict ,__snake_case :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Tuple ,*__snake_case :Dict ) -> List[Any]:
a__ = self.num_labels
a__ = OpenAIGPTForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :str ) -> Any:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Dict = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCAmelCase__ : Optional[Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__( self :Any ,__snake_case :Optional[int] ,__snake_case :Any ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :List[Any] ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :List[Any] ,__snake_case :List[Any]=False ) -> Optional[int]:
a__ = super()._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=__snake_case ,)
a__ = inputs_dict['labels']
a__ = inputs_dict['labels']
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=__snake_case ,)
a__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
return inputs_dict
def lowerCamelCase__( self :Any ) -> Tuple:
a__ = OpenAIGPTModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :Union[str, Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__snake_case )
def lowerCamelCase__( self :int ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
def lowerCamelCase__( self :int ) -> List[str]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> int:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__snake_case )
@slow
def lowerCamelCase__( self :Dict ) -> int:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = OpenAIGPTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(__snake_case )
a__ = torch.tensor([[4_81, 47_35, 5_44]] ,dtype=torch.long ,device=__snake_case ) # the president is
a__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 109 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case : str = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
a__ = b.T
a__ = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
a__ = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
a__ = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
a__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ):
a__ = x.reshape(-1 , 3 )
a__ = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ['''pixel_values''']
def __init__( self :Dict ,__snake_case :Optional[Union[List[List[int]], np.ndarray]] = None ,__snake_case :bool = True ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :bool = True ,__snake_case :bool = True ,**__snake_case :Optional[int] ,) -> None:
super().__init__(**__snake_case )
a__ = size if size is not None else {'height': 2_56, 'width': 2_56}
a__ = get_size_dict(__snake_case )
a__ = np.array(__snake_case ) if clusters is not None else None
a__ = do_resize
a__ = size
a__ = resample
a__ = do_normalize
a__ = do_color_quantize
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Any ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
__snake_case ,size=(size['height'], size['width']) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,) -> np.ndarray:
a__ = rescale(image=__snake_case ,scale=1 / 1_27.5 ,data_format=__snake_case )
a__ = image - 1
return image
def lowerCamelCase__( self :Optional[Any] ,__snake_case :ImageInput ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[List[List[int]], np.ndarray]] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**__snake_case :List[str] ,) -> PIL.Image.Image:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = size if size is not None else self.size
a__ = get_size_dict(__snake_case )
a__ = resample if resample is not None else self.resample
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a__ = clusters if clusters is not None else self.clusters
a__ = np.array(__snake_case )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
a__ = [self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case ) for image in images]
if do_normalize:
a__ = [self.normalize(image=__snake_case ) for image in images]
if do_color_quantize:
a__ = [to_channel_dimension_format(__snake_case ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a__ = np.array(__snake_case )
a__ = color_quantize(__snake_case ,__snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
a__ = images.shape[0]
a__ = images.reshape(__snake_case ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
a__ = list(__snake_case )
else:
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'input_ids': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 109 | 1 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE :Optional[Any] = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE :str = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE :Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE :Dict = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE :List[Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
SCREAMING_SNAKE_CASE :str = '3.0.12'
SCREAMING_SNAKE_CASE :Optional[int] = None
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
global _logger
__A = _logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[Any] ,A : int ):
__A = lock_file
return None
def __str__( self : str ):
__A = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ,A : str ):
__A = lock
return None
def __enter__( self : int ):
return self.lock
def __exit__( self : str ,A : List[str] ,A : List[Any] ,A : Any ):
self.lock.release()
return None
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ,A : Any ,A : Dict=-1 ,A : Optional[Any]=None ):
__A = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__A = self.hash_filename_if_too_long(_A ,_A )
# The path to the lock file.
__A = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__A = None
# The default timeout value.
__A = timeout
# We use this lock primarily for the lock counter.
__A = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__A = 0
return None
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return self._lock_file
@property
def UpperCamelCase_ ( self : str ):
return self._timeout
@timeout.setter
def UpperCamelCase_ ( self : str ,A : Dict ):
__A = float(_A )
return None
def UpperCamelCase_ ( self : Optional[int] ):
raise NotImplementedError()
def UpperCamelCase_ ( self : Tuple ):
raise NotImplementedError()
@property
def UpperCamelCase_ ( self : List[Any] ):
return self._lock_file_fd is not None
def UpperCamelCase_ ( self : str ,A : List[Any]=None ,A : Optional[Any]=0.05 ):
if timeout is None:
__A = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__A = id(self )
__A = self._lock_file
__A = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(_A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__A = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase_ ( self : int ,A : Any=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__A = id(self )
__A = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__A = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : Union[str, Any] ):
self.acquire()
return self
def __exit__( self : Optional[int] ,A : int ,A : str ,A : Any ):
self.release()
return None
def __del__( self : Any ):
self.release(force=_A )
return None
def UpperCamelCase_ ( self : List[str] ,A : str ,A : int ):
__A = os.path.basename(_A )
if len(_A ) > max_length and max_length > 0:
__A = os.path.dirname(_A )
__A = str(hash(_A ) )
__A = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(_A ,_A )
else:
return path
class UpperCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Tuple ,A : Optional[int] ,A : List[str]=-1 ,A : Dict=None ):
from .file_utils import relative_to_absolute_path
super().__init__(_A ,timeout=_A ,max_filename_length=_A )
__A = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCamelCase_ ( self : Tuple ):
__A = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__A = os.open(self._lock_file ,_A )
except OSError:
pass
else:
try:
msvcrt.locking(_A ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(_A )
else:
__A = fd
return None
def UpperCamelCase_ ( self : int ):
__A = self._lock_file_fd
__A = None
msvcrt.locking(_A ,msvcrt.LK_UNLCK ,1 )
os.close(_A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : str ,A : Any ,A : Any=-1 ,A : List[str]=None ):
__A = os.statvfs(os.path.dirname(_A ) ).f_namemax
super().__init__(_A ,timeout=_A ,max_filename_length=_A )
def UpperCamelCase_ ( self : int ):
__A = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__A = os.open(self._lock_file ,_A )
try:
fcntl.flock(_A ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_A )
else:
__A = fd
return None
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self._lock_file_fd
__A = None
fcntl.flock(_A ,fcntl.LOCK_UN )
os.close(_A )
return None
class UpperCAmelCase ( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self : int ):
__A = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__A = os.open(self._lock_file ,_A )
except OSError:
pass
else:
__A = fd
return None
def UpperCamelCase_ ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
__A = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE :int = None
if msvcrt:
SCREAMING_SNAKE_CASE :Dict = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE :List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE :int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 15 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def a__ ( lowerCAmelCase__ = 1_00 ) -> int:
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase__ : Tuple = pre_numerator
UpperCAmelCase__ : Tuple = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase__ : str = cur_numerator
UpperCAmelCase__ : List[str] = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 181 | 0 |
'''simple docstring'''
from __future__ import annotations
_UpperCamelCase = list[list[int]]
# assigning initial values to the grid
_UpperCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_UpperCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase_ ( lowerCAmelCase__ : Matrix , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase_ ( lowerCAmelCase__ : Matrix ):
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase_ ( lowerCAmelCase__ : Matrix ):
"""simple docstring"""
if location := find_empty_location(lowerCAmelCase__ ):
__UpperCAmelCase : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCAmelCase : int = digit
if sudoku(lowerCAmelCase__ ) is not None:
return grid
__UpperCAmelCase : Optional[Any] = 0
return None
def lowercase_ ( lowerCAmelCase__ : Matrix ):
"""simple docstring"""
for row in grid:
for cell in row:
print(lowerCAmelCase__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
_UpperCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 367 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_UpperCamelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths )
def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
__UpperCAmelCase : Tuple = pd.read_csv(lowerCAmelCase__ , sep="""\t""" , header=lowerCAmelCase__ )
for answer_list in data[1]:
__UpperCAmelCase : Optional[int] = ast.literal_eval(lowerCAmelCase__ )
answers.append(lowerCAmelCase__ )
else:
__UpperCAmelCase : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : str = [[reference] for reference in references]
__UpperCAmelCase : Optional[int] = 0
for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : int = 100.0 * em / total
__UpperCAmelCase : Dict = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = args.k
__UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Dict = [line.strip() for line in open(lowerCAmelCase__ , """r""" ).readlines()]
__UpperCAmelCase : Union[str, Any] = 0
for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCAmelCase : List[str] = set(hypo.split("""\t""" )[:k] )
__UpperCAmelCase : List[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__UpperCAmelCase : List[str] = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ):
"""simple docstring"""
def strip_title(lowerCAmelCase__ : Optional[int] ):
if title.startswith("""\"""" ):
__UpperCAmelCase : List[Any] = title[1:]
if title.endswith("""\"""" ):
__UpperCAmelCase : int = title[:-1]
return title
__UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )["""input_ids"""].to(args.device )
__UpperCAmelCase : str = rag_model.rag.question_encoder(lowerCAmelCase__ )
__UpperCAmelCase : int = question_enc_outputs[0]
__UpperCAmelCase : Dict = rag_model.retriever(
lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
__UpperCAmelCase : Union[str, Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__UpperCAmelCase : Union[str, Any] = []
for docs in all_docs:
__UpperCAmelCase : int = [strip_title(lowerCAmelCase__ ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(lowerCAmelCase__ ) )
return provenance_strings
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
with torch.no_grad():
__UpperCAmelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
__UpperCAmelCase : List[str] = inputs_dict.input_ids.to(args.device )
__UpperCAmelCase : List[Any] = inputs_dict.attention_mask.to(args.device )
__UpperCAmelCase : List[str] = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__UpperCAmelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
if args.print_predictions:
for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info("""Q: {} - A: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ ) )
return answers
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCAmelCase__ , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=lowerCAmelCase__ , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCAmelCase__ , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=lowerCAmelCase__ , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCAmelCase__ , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=lowerCAmelCase__ , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=lowerCAmelCase__ , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=lowerCAmelCase__ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=lowerCAmelCase__ , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=lowerCAmelCase__ , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=lowerCAmelCase__ , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=lowerCAmelCase__ , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
__UpperCAmelCase : str = parser.parse_args()
__UpperCAmelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def lowercase_ ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {}
if args.model_type is None:
__UpperCAmelCase : str = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
__UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
__UpperCAmelCase : Dict = args.n_docs
if args.index_name is not None:
__UpperCAmelCase : Union[str, Any] = args.index_name
if args.index_path is not None:
__UpperCAmelCase : Dict = args.index_path
else:
__UpperCAmelCase : str = BartForConditionalGeneration
__UpperCAmelCase : str = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
__UpperCAmelCase : Any = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(lowerCAmelCase__ ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
__UpperCAmelCase : Optional[int] = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__UpperCAmelCase : Any = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ )
model.retriever.init_retrieval()
else:
__UpperCAmelCase : Tuple = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
__UpperCAmelCase : Union[str, Any] = []
for line in tqdm(lowerCAmelCase__ ):
questions.append(line.strip() )
if len(lowerCAmelCase__ ) == args.eval_batch_size:
__UpperCAmelCase : Any = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write("""\n""".join(lowerCAmelCase__ ) + """\n""" )
preds_file.flush()
__UpperCAmelCase : List[str] = []
if len(lowerCAmelCase__ ) > 0:
__UpperCAmelCase : Optional[Any] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write("""\n""".join(lowerCAmelCase__ ) )
preds_file.flush()
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_UpperCamelCase = get_args()
main(args)
| 16 | 0 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = 256
class lowerCAmelCase ( lowercase_ ):
lowerCAmelCase_ = ["""melgan"""]
def __init__( self : Tuple , __lowercase : SpectrogramNotesEncoder , __lowercase : SpectrogramContEncoder , __lowercase : TaFilmDecoder , __lowercase : DDPMScheduler , __lowercase : OnnxRuntimeModel if is_onnx_available() else Any , ):
"""simple docstring"""
super().__init__()
# From MELGAN
__lowercase =math.log(1E-5 ) # Matches MelGAN training.
__lowercase =4.0 # Largest value for most examples
__lowercase =128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def snake_case ( self : str , __lowercase : Dict , __lowercase : Tuple=(-1.0, 1.0) , __lowercase : int=False ):
"""simple docstring"""
__lowercase , __lowercase =output_range
if clip:
__lowercase =torch.clip(__lowercase , self.min_value , self.max_value )
# Scale to [0, 1].
__lowercase =(features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def snake_case ( self : Union[str, Any] , __lowercase : List[str] , __lowercase : Union[str, Any]=(-1.0, 1.0) , __lowercase : Dict=False ):
"""simple docstring"""
__lowercase , __lowercase =input_range
__lowercase =torch.clip(__lowercase , __lowercase , __lowercase ) if clip else outputs
# Scale to [0, 1].
__lowercase =(outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def snake_case ( self : Dict , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Union[str, Any] ):
"""simple docstring"""
__lowercase =input_tokens > 0
__lowercase , __lowercase =self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase )
__lowercase , __lowercase =self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def snake_case ( self : Dict , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Any ):
"""simple docstring"""
__lowercase =noise_time
if not torch.is_tensor(__lowercase ):
__lowercase =torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__lowercase =timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase =timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__lowercase =self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase )
return logits
@torch.no_grad()
def __call__( self : List[Any] , __lowercase : List[List[int]] , __lowercase : Optional[torch.Generator] = None , __lowercase : int = 100 , __lowercase : bool = True , __lowercase : str = "numpy" , __lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowercase : int = 1 , ):
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__lowercase )}.''' )
__lowercase =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__lowercase =np.zeros([1, 0, self.n_dims] , np.floataa )
__lowercase =torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device )
for i, encoder_input_tokens in enumerate(__lowercase ):
if i == 0:
__lowercase =torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__lowercase =torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__lowercase =ones
__lowercase =self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase )
__lowercase =self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__lowercase =randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowercase =self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__lowercase =self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase ).prev_sample
__lowercase =self.scale_to_features(__lowercase , input_range=[-1.0, 1.0] )
__lowercase =mel[:1]
__lowercase =mel.cpu().float().numpy()
__lowercase =np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase )
logger.info('Generated segment' , __lowercase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
__lowercase =self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__lowercase =full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase )
| 141 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowerCamelCase : Tuple = 'naver-clova-ix/donut-base'
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__ = self.processor.tokenajson(UpperCamelCase )
self.assertDictEqual(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
'''simple docstring'''
import math
def lowerCamelCase__ ( _A ):
if not isinstance(_A , _A ):
a : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_A )
if number < 1:
a : Dict = f"""Input value of [number={number}] must be > 0"""
raise ValueError(_A )
elif number == 1:
return 3
elif number == 2:
return 5
else:
a : Tuple = int(math.log(number // 3 , 2 ) ) + 2
a : Union[str, Any] = [3, 5]
a : Union[str, Any] = 2
a : int = 3
for block in range(1 , _A ):
for _ in range(_A ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
lowerCAmelCase: str = 0
try:
lowerCAmelCase: Union[str, Any] = proth(number)
except ValueError:
print(F"ValueError: there is no {number}th Proth number")
continue
print(F"The {number}th Proth number: {value}") | 96 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase: Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[str] = ['PoolFormerFeatureExtractor']
lowerCAmelCase: Tuple = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure) | 96 | 1 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__a = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__a = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__a = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def __UpperCAmelCase ( a_: Any, a_: Tuple ):
return float((preds == labels).mean() )
def __UpperCAmelCase ( a_: str, a_: List[str] ):
_UpperCAmelCase : List[Any] = simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )
_UpperCAmelCase : Any = float(fa_score(y_true=lowerCAmelCase_, y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( a_: Optional[int], a_: Any ):
_UpperCAmelCase : int = np.array(lowerCAmelCase_ )
_UpperCAmelCase : int = np.array(lowerCAmelCase_ )
_UpperCAmelCase : Any = en_sentvecs.shape[0]
# mean centering
_UpperCAmelCase : Optional[int] = en_sentvecs - np.mean(lowerCAmelCase_, axis=0 )
_UpperCAmelCase : Tuple = in_sentvecs - np.mean(lowerCAmelCase_, axis=0 )
_UpperCAmelCase : Union[str, Any] = cdist(lowerCAmelCase_, lowerCAmelCase_, "cosine" )
_UpperCAmelCase : str = np.array(range(lowerCAmelCase_ ) )
_UpperCAmelCase : Union[str, Any] = sim.argsort(axis=1 )[:, :10]
_UpperCAmelCase : Optional[int] = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase_ , lowerCAmelCase_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" ) | 145 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
a_ = True
def lowercase ( self : Optional[Any] ) -> List[str]:
super().setUp()
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> str:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase ( self : List[Any] , lowerCAmelCase_ : str ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowercase ( self : List[str] ) -> Optional[int]:
pass # TODO add if relevant
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def lowercase ( self : Union[str, Any] ) -> Any:
pass # TODO add if relevant
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : List[Any] ) -> int:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : int ) -> str:
__lowerCAmelCase = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_jumanpp
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = JumanppTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = JumanppTokenizer(normalize_text=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
__lowerCAmelCase = tokenizer.subword_tokenizer
__lowerCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowerCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
__lowerCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowerCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
def lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : str , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase ( self : Dict ) -> str:
pass # TODO add if relevant
def lowercase ( self : Any ) -> str:
pass # TODO add if relevant
def lowercase ( self : List[Any] ) -> int:
pass # TODO add if relevant
def lowercase ( self : str ) -> str:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowerCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
__lowerCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 284 | 0 |
"""simple docstring"""
def a__ ( __lowercase ) -> float:
_A = 0
while len(__lowercase ) > 1:
_A = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_A = files.index(min(__lowercase ) )
temp += files[min_index]
files.pop(__lowercase )
files.append(__lowercase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod() | 163 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def a__ ( __lowercase , __lowercase , __lowercase = 1_6000 ) -> List[str]:
_A = int(round(sample_rate * max_length ) )
if len(__lowercase ) <= sample_length:
return wav
_A = randint(0 , len(__lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class snake_case :
__UpperCamelCase = field(default=_UpperCamelCase , metadata={'help': 'Name of a dataset from the datasets package'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'A file containing the training audio paths and labels.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'A file containing the validation audio paths and labels.'})
__UpperCamelCase = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
__UpperCamelCase = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
__UpperCamelCase = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
__UpperCamelCase = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class snake_case :
__UpperCamelCase = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
__UpperCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , a__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def a__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--label_column_name` to the correct text column - one of "
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_A = feature_extractor.model_input_names[0]
def train_transforms(__lowercase ):
_A = []
for audio in batch[data_args.audio_column_name]:
_A = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowercase )
_A = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(__lowercase )}
_A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__lowercase ):
_A = [audio["array"] for audio in batch[data_args.audio_column_name]]
_A = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(__lowercase )}
_A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_A = raw_datasets["train"].features[data_args.label_column_name].names
_A , _A = {}, {}
for i, label in enumerate(__lowercase ):
_A = str(__lowercase )
_A = label
# Load the accuracy metric from the datasets package
_A = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__lowercase ):
_A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowercase , references=eval_pred.label_ids )
_A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_A = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowercase , output_all_columns=__lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_A = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowercase , output_all_columns=__lowercase )
# Initialize our trainer
_A = Trainer(
model=__lowercase , args=__lowercase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics("eval" , __lowercase )
trainer.save_metrics("eval" , __lowercase )
# Write model card and (optionally) push to hub
_A = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main() | 163 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
_lowercase =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_lowercase =DDPMScheduler()
_lowercase =AudioDiffusionPipeline(vqvae=lowerCAmelCase , unet=self.dummy_unet , mel=lowerCAmelCase , scheduler=lowerCAmelCase )
_lowercase =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(42 )
_lowercase =pipe(generator=lowerCAmelCase , steps=4 )
_lowercase =output.audios[0]
_lowercase =output.images[0]
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(42 )
_lowercase =pipe(generator=lowerCAmelCase , steps=4 , return_dict=lowerCAmelCase )
_lowercase =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_lowercase =np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_lowercase =np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
_lowercase =np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_lowercase =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_lowercase =DDIMScheduler()
_lowercase =self.dummy_vqvae_and_unet
_lowercase =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCAmelCase , scheduler=lowerCAmelCase )
_lowercase =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
np.random.seed(0 )
_lowercase =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(42 )
_lowercase =pipe(raw_audio=lowerCAmelCase , generator=lowerCAmelCase , start_step=5 , steps=10 )
_lowercase =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_lowercase =np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_lowercase =np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_lowercase =self.dummy_unet_condition
_lowercase =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCAmelCase , mel=lowerCAmelCase , scheduler=lowerCAmelCase )
_lowercase =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
np.random.seed(0 )
_lowercase =torch.rand((1, 1, 10) )
_lowercase =pipe(generator=lowerCAmelCase , encoding=lowerCAmelCase )
_lowercase =output.images[0]
_lowercase =np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_lowercase =np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =torch_device
_lowercase =DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
_lowercase =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(42 )
_lowercase =pipe(generator=lowerCAmelCase )
_lowercase =output.audios[0]
_lowercase =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_lowercase =np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_lowercase =np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 205 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
lowerCAmelCase__ = json.load(f)
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(lowercase )
def UpperCamelCase ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = F'facebook/wmt19-{pair}'
A__ = self.get_tokenizer(lowercase )
A__ = self.get_model(lowercase )
A__ = bleu_data[pair]["src"]
A__ = bleu_data[pair]["tgt"]
A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase )
A__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
A__ = tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
A__ = calculate_bleu(lowercase , lowercase )
print(lowercase )
self.assertGreaterEqual(scores["bleu"] , lowercase )
| 68 | 0 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _a ( _lowercase : Union[str, Any] , _lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = checkpoint
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : str = vae_state_dict['''encoder.conv_in.weight''']
__UpperCAmelCase : str = vae_state_dict['''encoder.conv_in.bias''']
__UpperCAmelCase : Union[str, Any] = vae_state_dict['''encoder.conv_out.weight''']
__UpperCAmelCase : Optional[int] = vae_state_dict['''encoder.conv_out.bias''']
__UpperCAmelCase : List[str] = vae_state_dict['''encoder.norm_out.weight''']
__UpperCAmelCase : List[Any] = vae_state_dict['''encoder.norm_out.bias''']
__UpperCAmelCase : int = vae_state_dict['''decoder.conv_in.weight''']
__UpperCAmelCase : int = vae_state_dict['''decoder.conv_in.bias''']
__UpperCAmelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
__UpperCAmelCase : Any = vae_state_dict['''decoder.conv_out.bias''']
__UpperCAmelCase : Any = vae_state_dict['''decoder.norm_out.weight''']
__UpperCAmelCase : List[str] = vae_state_dict['''decoder.norm_out.bias''']
__UpperCAmelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
__UpperCAmelCase : Union[str, Any] = vae_state_dict['''quant_conv.bias''']
__UpperCAmelCase : Dict = vae_state_dict['''post_quant_conv.weight''']
__UpperCAmelCase : str = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
__UpperCAmelCase : Optional[Any] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
__UpperCAmelCase : Any = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(_lowercase )
}
# Retrieves the keys for the decoder up blocks only
__UpperCAmelCase : Dict = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
__UpperCAmelCase : Optional[int] = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(_lowercase )
}
for i in range(_lowercase ):
__UpperCAmelCase : Tuple = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
__UpperCAmelCase : Dict = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
__UpperCAmelCase : Optional[Any] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
__UpperCAmelCase : Optional[int] = renew_vae_resnet_paths(_lowercase )
__UpperCAmelCase : List[Any] = {'''old''': F'down.{i}.block', '''new''': F'down_blocks.{i}.resnets'}
assign_to_checkpoint(_lowercase , _lowercase , _lowercase , additional_replacements=[meta_path] , config=_lowercase )
__UpperCAmelCase : int = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
__UpperCAmelCase : str = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__UpperCAmelCase : int = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
__UpperCAmelCase : List[str] = renew_vae_resnet_paths(_lowercase )
__UpperCAmelCase : int = {'''old''': F'mid.block_{i}', '''new''': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(_lowercase , _lowercase , _lowercase , additional_replacements=[meta_path] , config=_lowercase )
__UpperCAmelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
__UpperCAmelCase : Dict = renew_vae_attention_paths(_lowercase )
__UpperCAmelCase : Optional[Any] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(_lowercase , _lowercase , _lowercase , additional_replacements=[meta_path] , config=_lowercase )
conv_attn_to_linear(_lowercase )
for i in range(_lowercase ):
__UpperCAmelCase : List[str] = num_up_blocks - 1 - i
__UpperCAmelCase : Optional[int] = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
__UpperCAmelCase : Any = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
__UpperCAmelCase : Any = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
__UpperCAmelCase : str = renew_vae_resnet_paths(_lowercase )
__UpperCAmelCase : Dict = {'''old''': F'up.{block_id}.block', '''new''': F'up_blocks.{i}.resnets'}
assign_to_checkpoint(_lowercase , _lowercase , _lowercase , additional_replacements=[meta_path] , config=_lowercase )
__UpperCAmelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
__UpperCAmelCase : Union[str, Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__UpperCAmelCase : Union[str, Any] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
__UpperCAmelCase : Optional[int] = renew_vae_resnet_paths(_lowercase )
__UpperCAmelCase : Tuple = {'''old''': F'mid.block_{i}', '''new''': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(_lowercase , _lowercase , _lowercase , additional_replacements=[meta_path] , config=_lowercase )
__UpperCAmelCase : Optional[int] = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
__UpperCAmelCase : Dict = renew_vae_attention_paths(_lowercase )
__UpperCAmelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(_lowercase , _lowercase , _lowercase , additional_replacements=[meta_path] , config=_lowercase )
conv_attn_to_linear(_lowercase )
return new_checkpoint
def _a ( _lowercase : str , _lowercase : str , ):
'''simple docstring'''
__UpperCAmelCase : str = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
__UpperCAmelCase : Optional[int] = io.BytesIO(r.content )
__UpperCAmelCase : int = OmegaConf.load(_lowercase )
__UpperCAmelCase : Optional[Any] = 512
__UpperCAmelCase : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
__UpperCAmelCase : Union[str, Any] = {}
with safe_open(_lowercase , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
__UpperCAmelCase : Tuple = f.get_tensor(_lowercase )
else:
__UpperCAmelCase : List[Any] = torch.load(_lowercase , map_location=_lowercase )['''state_dict''']
# Convert the VAE model.
__UpperCAmelCase : int = create_vae_diffusers_config(_lowercase , image_size=_lowercase )
__UpperCAmelCase : str = custom_convert_ldm_vae_checkpoint(_lowercase , _lowercase )
__UpperCAmelCase : Tuple = AutoencoderKL(**_lowercase )
vae.load_state_dict(_lowercase )
vae.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
__UpperCAmelCase :Optional[Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 350 |
'''simple docstring'''
import requests
__UpperCAmelCase :Union[str, Any] = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def _a ( _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>") | 240 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class _a :
def __init__( self : Union[str, Any] , lowercase : int , lowercase : MutableSequence[float] ):
'''simple docstring'''
if len(lowercase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCAmelCase = list(lowercase )
UpperCAmelCase = degree
def __add__( self : List[Any] , lowercase : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowercase )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowercase )
def __sub__( self : str , lowercase : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Optional[int] ):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[Any] , lowercase : Polynomial ):
'''simple docstring'''
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowercase )
def A ( self : Optional[int] , lowercase : int | float ):
'''simple docstring'''
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ):
'''simple docstring'''
UpperCAmelCase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase )
return polynomial
def __repr__( self : List[Any] ):
'''simple docstring'''
return self.__str__()
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowercase )
def A ( self : str , lowercase : int | float = 0 ):
'''simple docstring'''
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowercase )
def __eq__( self : List[Any] , lowercase : object ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , lowercase : object ):
'''simple docstring'''
return not self.__eq__(lowercase )
| 34 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F'''{test_file} instead.''' )
snake_case_ = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
snake_case_ = components[:-1] + [test_fn.replace('.py' , '' )]
snake_case_ = '.'.join(UpperCamelCase__ )
return test_module_path
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = get_module_path(UpperCamelCase__ )
snake_case_ = importlib.import_module(UpperCamelCase__ )
return test_module
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
snake_case_ = getattr(UpperCamelCase__ , UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case_ = getattr(UpperCamelCase__ , 'all_model_classes' , [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = get_test_classes(UpperCamelCase__ )
snake_case_ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = test_class()
if hasattr(UpperCamelCase__ , 'setUp' ):
test.setUp()
snake_case_ = None
if hasattr(UpperCamelCase__ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case_ = test.model_tester.__class__
return model_tester
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = get_test_classes(UpperCamelCase__ )
snake_case_ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = []
for test_class in test_classes:
snake_case_ = get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = get_test_classes(UpperCamelCase__ )
snake_case_ = {test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = get_model_classes(UpperCamelCase__ )
snake_case_ = {
model_class: get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = get_model_classes(UpperCamelCase__ )
snake_case_ = {
model_class: get_tester_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o
| 200 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case_ = max(
mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , )
snake_case_ = val
return f[i][j]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case_ = dp[i - 1][w_]
return dp[n][w_], dp
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
snake_case_ = len(UpperCamelCase__ )
if num_items != len(UpperCamelCase__ ):
snake_case_ = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(UpperCamelCase__ )} values'''
)
raise ValueError(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
if not isinstance(wt[i] , UpperCamelCase__ ):
snake_case_ = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(UpperCamelCase__ )
snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = set()
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return optimal_val, example_optional_set
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
optimal_set.add(UpperCamelCase__ )
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : int = [3, 2, 4, 4]
_UpperCAmelCase : Tuple = [4, 3, 2, 3]
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : int = 6
_UpperCAmelCase : Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 200 | 1 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
# load base model
A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(lowercase_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
A__ = pipeline.text_encoder
else:
A__ = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
A__ = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = args.base_model_path
SCREAMING_SNAKE_CASE = args.checkpoint_path
SCREAMING_SNAKE_CASE = args.dump_path
SCREAMING_SNAKE_CASE = args.lora_prefix_unet
SCREAMING_SNAKE_CASE = args.lora_prefix_text_encoder
SCREAMING_SNAKE_CASE = args.alpha
SCREAMING_SNAKE_CASE = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
SCREAMING_SNAKE_CASE = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 247 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
default=A_, metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(A_ )} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
lowercase__ = field(
default=1_28, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowercase__ = field(
default=1_28, metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''}, )
lowercase__ = field(
default=64, metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
}, )
lowercase__ = field(
default=30, metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
lowercase__ = field(
default=0.0, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=20, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=0, metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
}, )
lowercase__ = field(default=1, metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''train'''
lowercase__ = '''dev'''
class UpperCAmelCase_ ( A_ ):
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
def __init__( self : List[Any] , snake_case_ : SquadDataTrainingArguments , snake_case_ : PreTrainedTokenizer , snake_case_ : Optional[int] = None , snake_case_ : Union[str, Split] = Split.train , snake_case_ : Optional[bool] = False , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = "pt" , ) -> Union[str, Any]:
'''simple docstring'''
A__ = args
A__ = is_language_sensitive
A__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case_ , snake_case_ ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
A__ = mode
# Load data features from cache or dataset file
A__ = "v2" if args.version_2_with_negative else "v1"
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + ".lock"
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(snake_case_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A__ = self.old_features["features"]
A__ = self.old_features.get("dataset" , snake_case_ )
A__ = self.old_features.get("examples" , snake_case_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
A__, A__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case_ , )
A__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , snake_case_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , snake_case_ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ = self.features[i]
A__ = torch.tensor(feature.input_ids , dtype=torch.long )
A__ = torch.tensor(feature.attention_mask , dtype=torch.long )
A__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
A__ = torch.tensor(feature.cls_index , dtype=torch.long )
A__ = torch.tensor(feature.p_mask , dtype=torch.float )
A__ = torch.tensor(feature.is_impossible , dtype=torch.float )
A__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A__ = torch.tensor(feature.start_position , dtype=torch.long )
A__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 247 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : Any = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '<unk> UNwanted , running'
SCREAMING_SNAKE_CASE_ : List[str] = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(lowerCAmelCase__ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = len(lowerCAmelCase__ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 368 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = key
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : Dict = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : str = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('encrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('decrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 162 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = VideoToVideoSDPipeline
UpperCAmelCase_ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
UpperCAmelCase_ :int = PipelineTesterMixin.required_optional_params - {"latents"}
UpperCAmelCase_ :Union[str, Any] = False
# No `output_type`.
UpperCAmelCase_ :Optional[Any] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :Any = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCAmelCase_ :int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCAmelCase_ :Any = CLIPTextModel(__A )
lowerCAmelCase_ :Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> Dict:
# 3 frames
lowerCAmelCase_ :str = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :List[str] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :Optional[Any] = self.get_dummy_components()
lowerCAmelCase_ :Optional[int] = VideoToVideoSDPipeline(**__A )
lowerCAmelCase_ :List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = """np"""
lowerCAmelCase_ :List[str] = sd_pipe(**__A ).frames
lowerCAmelCase_ :Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
lowerCAmelCase_ :Union[str, Any] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A , expected_max_diff=5E-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> str:
return super().test_progress_bar()
@slow
@skip_mps
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :str = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
lowerCAmelCase_ :int = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = torch.randn((1, 10, 3, 1024, 576) , generator=__A )
lowerCAmelCase_ :Optional[int] = video.to("""cuda""" )
lowerCAmelCase_ :Tuple = """Spiderman is surfing"""
lowerCAmelCase_ :Union[str, Any] = pipe(__A , video=__A , generator=__A , num_inference_steps=3 , output_type="""pt""" ).frames
lowerCAmelCase_ :str = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 84 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "bert-generation"
def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Any = vocab_size
lowerCAmelCase_ :List[Any] = hidden_size
lowerCAmelCase_ :Optional[int] = num_hidden_layers
lowerCAmelCase_ :int = num_attention_heads
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Optional[Any] = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_dropout_prob
lowerCAmelCase_ :int = attention_probs_dropout_prob
lowerCAmelCase_ :Tuple = max_position_embeddings
lowerCAmelCase_ :List[str] = initializer_range
lowerCAmelCase_ :Union[str, Any] = layer_norm_eps
lowerCAmelCase_ :List[str] = position_embedding_type
lowerCAmelCase_ :Optional[int] = use_cache
| 84 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A : List[Any] = logging.get_logger(__name__)
A : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A : Dict = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
A : Tuple = {
'gpt2': 1_0_2_4,
'gpt2-medium': 1_0_2_4,
'gpt2-large': 1_0_2_4,
'gpt2-xl': 1_0_2_4,
'distilgpt2': 1_0_2_4,
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['''input_ids''', '''attention_mask''']
A__ = GPTaTokenizer
def __init__(self : Tuple , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]="<|endoftext|>" , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , _UpperCAmelCase : int="<|endoftext|>" , _UpperCAmelCase : Optional[int]=False , **_UpperCAmelCase : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = kwargs.pop("""add_bos_token""" , _UpperCAmelCase )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _UpperCAmelCase ) != add_prefix_space:
lowercase__ = getattr(_UpperCAmelCase , pre_tok_state.pop("""type""" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**_UpperCAmelCase )
lowercase__ = add_prefix_space
def lowerCamelCase__ (self : Optional[Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Tuple ) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get("""is_split_into_words""" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get("""is_split_into_words""" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
lowercase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] )
if len(_UpperCAmelCase ) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
return input_ids
| 369 |
A : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def UpperCamelCase ( ) -> None:
"""simple docstring"""
lowercase__ = input("""Enter message: """ )
lowercase__ = input("""Enter key [alphanumeric]: """ )
lowercase__ = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase__ = """encrypt"""
lowercase__ = encrypt_message(__magic_name__ , __magic_name__ )
elif mode.lower().startswith("""d""" ):
lowercase__ = """decrypt"""
lowercase__ = decrypt_message(__magic_name__ , __magic_name__ )
print(f'''\n{mode.title()}ed message:''' )
print(__magic_name__ )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> str:
"""simple docstring"""
return translate_message(__magic_name__ , __magic_name__ , """encrypt""" )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> str:
"""simple docstring"""
return translate_message(__magic_name__ , __magic_name__ , """decrypt""" )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : str ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = 0
lowercase__ = key.upper()
for symbol in message:
lowercase__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__magic_name__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__magic_name__ ):
lowercase__ = 0
else:
translated.append(__magic_name__ )
return "".join(__magic_name__ )
if __name__ == "__main__":
main()
| 146 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> str:
if not head:
return True
# split the list to two parts
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = head.next, head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ : Any = fast.next.next
SCREAMING_SNAKE_CASE__ : Union[str, Any] = slow.next
SCREAMING_SNAKE_CASE__ : int = slow.next
SCREAMING_SNAKE_CASE__ : int = None # Don't forget here! But forget still works!
# reverse the second part
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
while second:
SCREAMING_SNAKE_CASE__ : Optional[int] = second.next
SCREAMING_SNAKE_CASE__ : List[Any] = node
SCREAMING_SNAKE_CASE__ : Any = second
SCREAMING_SNAKE_CASE__ : Optional[int] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.next
SCREAMING_SNAKE_CASE__ : str = head.next
return True
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
SCREAMING_SNAKE_CASE__ : Optional[Any] = head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
SCREAMING_SNAKE_CASE__ : Any = [slow.val]
while slow.next:
SCREAMING_SNAKE_CASE__ : List[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
SCREAMING_SNAKE_CASE__ : List[str] = cur.next
return True
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
if not head or not head.next:
return True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [pos]
SCREAMING_SNAKE_CASE__ : str = head.next
pos += 1
SCREAMING_SNAKE_CASE__ : Dict = pos - 1
SCREAMING_SNAKE_CASE__ : str = 0
for v in d.values():
if len(__lowerCAmelCase ) % 2 != 0:
middle += 1
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for i in range(0 , len(__lowerCAmelCase ) ):
if v[i] + v[len(__lowerCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 132 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=2 , _a=24 , _a=16 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , _a=2 , _a=2 , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : str = max_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_mel_bins
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : List[str] = frequency_stride
SCREAMING_SNAKE_CASE__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE__ : Any = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE__ : Any = num_patches + 2
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_values, labels
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ASTModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :Dict = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
_SCREAMING_SNAKE_CASE :Any = False
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
_SCREAMING_SNAKE_CASE :Tuple = False
def _a ( self , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ASTModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _a ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
"""simple docstring"""
pass
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(_a )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ASTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( ) -> int:
SCREAMING_SNAKE_CASE__ : List[Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torchaudio.load(__lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __a (unittest.TestCase):
'''simple docstring'''
@cached_property
def _a ( self ) -> int:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[Any] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Dict = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_audio()
SCREAMING_SNAKE_CASE__ : List[str] = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(_a , sampling_rate=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_a )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _a )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 132 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A )
class lowercase_ ( A ):
"""simple docstring"""
def __init__( self : int , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __lowerCamelCase : Union[str, List[str]] = None , **__lowerCamelCase : List[str] , ):
"""simple docstring"""
if "text_queries" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs.pop("text_queries" )
if isinstance(__lowerCamelCase , (str, Image.Image) ):
_SCREAMING_SNAKE_CASE = {"image": image, "candidate_labels": candidate_labels}
else:
_SCREAMING_SNAKE_CASE = image
_SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , **__lowerCamelCase )
return results
def lowerCAmelCase_ ( self : Any , **__lowerCamelCase : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["threshold"]
if "top_k" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["top_k"]
return {}, {}, postprocess_params
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = load_image(inputs["image"] )
_SCREAMING_SNAKE_CASE = inputs["candidate_labels"]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = candidate_labels.split("," )
_SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = self.tokenizer(__lowerCamelCase , return_tensors=self.framework )
_SCREAMING_SNAKE_CASE = self.image_processor(__lowerCamelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__lowerCamelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = model_inputs.pop("target_size" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("candidate_label" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("is_last" )
_SCREAMING_SNAKE_CASE = self.model(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Tuple=None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
_SCREAMING_SNAKE_CASE = model_output["candidate_label"]
_SCREAMING_SNAKE_CASE = BaseModelOutput(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=__lowerCamelCase , threshold=__lowerCamelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
_SCREAMING_SNAKE_CASE = outputs["scores"][index].item()
_SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs["boxes"][index][0] )
_SCREAMING_SNAKE_CASE = {"score": score, "label": label, "box": box}
results.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x["score"] , reverse=__lowerCamelCase )
if top_k:
_SCREAMING_SNAKE_CASE = results[:top_k]
return results
def lowerCAmelCase_ ( self : int , __lowerCamelCase : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = box.int().tolist()
_SCREAMING_SNAKE_CASE = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 111 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCamelCase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE = torchvision.models.resnetaaa(pretrained=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = list(model.children() )[:-2]
_SCREAMING_SNAKE_CASE = nn.Sequential(*__lowerCamelCase )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
_SCREAMING_SNAKE_CASE = self.pool(self.model(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = torch.flatten(__lowerCamelCase , start_dim=2 )
_SCREAMING_SNAKE_CASE = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowercase_ ( A ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [json.loads(__lowerCamelCase ) for l in open(__lowerCamelCase )]
_SCREAMING_SNAKE_CASE = os.path.dirname(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer
_SCREAMING_SNAKE_CASE = labels
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = max_seq_length
_SCREAMING_SNAKE_CASE = transforms
def __len__( self : int ):
"""simple docstring"""
return len(self.data )
def __getitem__( self : str , __lowerCamelCase : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = sentence[0], sentence[1:-1], sentence[-1]
_SCREAMING_SNAKE_CASE = sentence[: self.max_seq_length]
_SCREAMING_SNAKE_CASE = torch.zeros(self.n_classes )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
_SCREAMING_SNAKE_CASE = self.transforms(__lowerCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> Dict:
_SCREAMING_SNAKE_CASE = [len(row["sentence"] ) for row in batch]
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = len(__A ), max(__A )
_SCREAMING_SNAKE_CASE = torch.zeros(__A , __A , dtype=torch.long )
_SCREAMING_SNAKE_CASE = torch.zeros(__A , __A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__A , __A ) ):
_SCREAMING_SNAKE_CASE = input_row["sentence"]
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = torch.stack([row["image"] for row in batch] )
_SCREAMING_SNAKE_CASE = torch.stack([row["label"] for row in batch] )
_SCREAMING_SNAKE_CASE = torch.stack([row["image_start_token"] for row in batch] )
_SCREAMING_SNAKE_CASE = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 111 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct_text_model"
snake_case = ["past_key_values"]
snake_case = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0244 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
A_ : Optional[int] = vocab_size
A_ : Any = hidden_size
A_ : Optional[Any] = d_kv
A_ : int = d_ff
A_ : int = num_layers
A_ : Dict = num_heads
A_ : Any = relative_attention_num_buckets
A_ : int = relative_attention_max_distance
A_ : Optional[Any] = dropout_rate
A_ : Optional[Any] = layer_norm_epsilon
A_ : List[Any] = initializer_factor
A_ : Optional[int] = use_cache
A_ : Optional[Any] = eos_token_id
A_ : List[Any] = decoder_start_token_id
# for backwards compatibility
A_ : int = dense_act_fn
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , is_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
A_ , A_ : Union[str, Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A_ : Any = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct_vision_model"
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1e-10 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , **_SCREAMING_SNAKE_CASE , )->Optional[int]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Dict = hidden_size
A_ : Union[str, Any] = patch_embed_hidden_size
A_ : Optional[Any] = d_ff
A_ : Optional[Any] = dropout_rate
A_ : int = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[str] = initializer_range
A_ : List[str] = initializer_factor
A_ : Union[str, Any] = attention_dropout
A_ : Union[str, Any] = layer_norm_eps
A_ : Dict = dense_act_fn
A_ : Union[str, Any] = seq_len
A_ : Optional[Any] = relative_attention_num_buckets
A_ : Tuple = relative_attention_max_distance
A_ : List[Any] = d_kv
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
A_ , A_ : str = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A_ : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct"
snake_case = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->Any:
'''simple docstring'''
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text_config is None:
A_ : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A_ : List[Any] = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A_ : int = PixaStructTextConfig(**_SCREAMING_SNAKE_CASE )
A_ : Dict = PixaStructVisionConfig(**_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.text_config.decoder_start_token_id
A_ : Tuple = self.text_config.pad_token_id
A_ : Union[str, Any] = self.text_config.eos_token_id
A_ : str = initializer_factor
A_ : Tuple = initializer_range
A_ : List[str] = self.initializer_range
A_ : int = self.initializer_range
A_ : List[Any] = is_vqa
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[Any] = copy.deepcopy(self.__dict__ )
A_ : Dict = self.text_config.to_dict()
A_ : int = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output
| 186 | 1 |
"""simple docstring"""
import os
from pathlib import Path
def lowercase__ ( ) -> Dict:
"""simple docstring"""
from torch.utils.cpp_extension import load
_UpperCamelCase : Optional[Any] = Path(lowercase_ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
_UpperCamelCase : Any = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" ,"ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" ,"ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" ,lowercase_ ,with_cuda=lowercase_ ,extra_include_paths=[str(lowercase_ )] ,extra_cflags=["-DWITH_CUDA=1"] ,extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] ,)
import MultiScaleDeformableAttention as MSDA
return MSDA
| 310 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310 | 1 |
"""simple docstring"""
_a = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_a = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_a = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 194 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
_UpperCamelCase = ksize + 1
_UpperCamelCase = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
_UpperCamelCase = x - ksize // 2
_UpperCamelCase = y - ksize // 2
# degree to radiant
_UpperCamelCase = theta / 1_80 * np.pi
_UpperCamelCase = np.cos(_theta )
_UpperCamelCase = np.sin(_theta )
# get kernel x
_UpperCamelCase = cos_theta * px + sin_theta * py
# get kernel y
_UpperCamelCase = -sin_theta * px + cos_theta * py
# fill kernel
_UpperCamelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_a = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_a = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_a = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_a = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_a = out / out.max() * 255
_a = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 194 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCAmelCase : int =datasets.utils.logging.get_logger(__name__)
__lowerCAmelCase : Tuple =["names", "prefix"]
__lowerCAmelCase : List[str] =["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__lowerCAmelCase : Optional[int] =["encoding_errors", "on_bad_lines"]
__lowerCAmelCase : Dict =["date_format"]
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
__lowercase = ","
__lowercase = None
__lowercase = "infer"
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = True
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = False
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = None
__lowercase = "."
__lowercase = None
__lowercase = '"'
__lowercase = 0
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = True
__lowercase = True
__lowercase = 0
__lowercase = True
__lowercase = False
__lowercase = None
__lowercase = 1_0000
__lowercase = None
__lowercase = "strict"
__lowercase = "error"
__lowercase = None
def UpperCAmelCase_ ( self :List[str] )-> str:
if self.delimiter is not None:
A__ = self.delimiter
if self.column_names is not None:
A__ = self.column_names
@property
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
A__ = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowercase_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
__lowercase = CsvConfig
def UpperCAmelCase_ ( self :List[str] )-> Any:
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase_ ( self :int , lowercase_ :str )-> Optional[Any]:
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
A__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase_ , (str, list, tuple) ):
A__ = data_files
if isinstance(lowercase_ , lowercase_ ):
A__ = [files]
A__ = [dl_manager.iter_files(lowercase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
A__ = []
for split_name, files in data_files.items():
if isinstance(lowercase_ , lowercase_ ):
A__ = [files]
A__ = [dl_manager.iter_files(lowercase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase_ , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :pa.Table )-> pa.Table:
if self.config.features is not None:
A__ = self.config.features.arrow_schema
if all(not require_storage_cast(lowercase_ ) for feature in self.config.features.values() ):
# cheaper cast
A__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowercase_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A__ = table_cast(lowercase_ , lowercase_ )
return pa_table
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] )-> Any:
A__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowercase_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase_ ) ):
A__ = pd.read_csv(lowercase_ , iterator=lowercase_ , dtype=lowercase_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowercase_ ):
A__ = pa.Table.from_pandas(lowercase_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowercase_ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(lowercase_ )}: {e}" )
raise
| 123 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase : Optional[Any] ="\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCAmelCase : Union[str, Any] ="\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__lowerCAmelCase : str ="\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
def remove_articles(_lowerCamelCase : Dict ):
A__ = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_lowerCamelCase , " " , _lowerCamelCase )
def white_space_fix(_lowerCamelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase : int ):
A__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ):
return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) )
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
A__ = [any(compute_exact(_lowerCamelCase , _lowerCamelCase ) for ref in refs ) for pred, refs in zip(_lowerCamelCase , _lowerCamelCase )]
return (sum(_lowerCamelCase ) / len(_lowerCamelCase )) * 1_00
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ):
A__ = [rgram for rgrams in rgramslist for rgram in rgrams]
A__ = Counter(_lowerCamelCase )
A__ = Counter(_lowerCamelCase )
A__ = Counter()
for sgram, scount in sgramcounter.items():
A__ = scount * numref
A__ = Counter(_lowerCamelCase )
A__ = Counter()
for cgram, ccount in cgramcounter.items():
A__ = ccount * numref
# KEEP
A__ = sgramcounter_rep & cgramcounter_rep
A__ = keepgramcounter_rep & rgramcounter
A__ = sgramcounter_rep & rgramcounter
A__ = 0
A__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = keeptmpscorea / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
A__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
A__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
A__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
A__ = sgramcounter_rep - cgramcounter_rep
A__ = delgramcounter_rep - rgramcounter
A__ = sgramcounter_rep - rgramcounter
A__ = 0
A__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = deltmpscorea / len(_lowerCamelCase )
# ADDITION
A__ = set(_lowerCamelCase ) - set(_lowerCamelCase )
A__ = set(_lowerCamelCase ) & set(_lowerCamelCase )
A__ = set(_lowerCamelCase ) - set(_lowerCamelCase )
A__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = addtmpscore / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
A__ = addtmpscore / len(_lowerCamelCase )
A__ = 0
if addscore_precision > 0 or addscore_recall > 0:
A__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
A__ = len(_lowerCamelCase )
A__ = ssent.split(" " )
A__ = csent.split(" " )
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
for rsent in rsents:
A__ = rsent.split(" " )
A__ = []
A__ = []
A__ = []
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
A__ = sum([delascore, delascore, delascore, delascore] ) / 4
A__ = sum([addascore, addascore, addascore, addascore] ) / 4
A__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : bool = True , _lowerCamelCase : str = "13a" , _lowerCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
A__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
A__ = sacrebleu.metrics.bleu._get_tokenizer(_lowerCamelCase )()(_lowerCamelCase )
else:
A__ = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCamelCase )
elif tokenizer == "moses":
A__ = sacremoses.MosesTokenizer().tokenize(_lowerCamelCase , return_str=_lowerCamelCase , escape=_lowerCamelCase )
elif tokenizer == "penn":
A__ = sacremoses.MosesTokenizer().penn_tokenize(_lowerCamelCase , return_str=_lowerCamelCase )
else:
A__ = sentence
if not return_str:
A__ = normalized_sent.split()
return normalized_sent
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
if not (len(_lowerCamelCase ) == len(_lowerCamelCase ) == len(_lowerCamelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
A__ = 0
for src, pred, refs in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
sari_score += SARIsent(normalize(_lowerCamelCase ) , normalize(_lowerCamelCase ) , [normalize(_lowerCamelCase ) for sent in refs] )
A__ = sari_score / len(_lowerCamelCase )
return 1_00 * sari_score
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : List[Any]="exp" , _lowerCamelCase : int=None , _lowerCamelCase : str=False , _lowerCamelCase : List[str]=False , _lowerCamelCase : Dict=False , ):
A__ = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A__ = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
A__ = sacrebleu.corpus_bleu(
_lowerCamelCase , _lowerCamelCase , smooth_method=_lowerCamelCase , smooth_value=_lowerCamelCase , force=_lowerCamelCase , lowercase=_lowerCamelCase , use_effective_order=_lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def UpperCAmelCase_ ( self :str , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :int )-> int:
A__ = {}
result.update({"sari": compute_sari(sources=lowercase_ , predictions=lowercase_ , references=lowercase_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=lowercase_ , references=lowercase_ )} )
result.update({"exact": compute_em(predictions=lowercase_ , references=lowercase_ )} )
return result
| 123 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = size
# approximate the overall size of segment tree with given value
lowerCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase : List[str] = [0 for i in range(0 , 4 * size )]
lowerCAmelCase : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return idx * 2
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return idx * 2 + 1
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if left_element == right_element:
lowerCAmelCase : List[str] = a[left_element - 1]
else:
lowerCAmelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ )
self.build(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = max(
self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase : Optional[int] = self.lazy[idx]
lowerCAmelCase : List[str] = False
if left_element != right_element:
lowerCAmelCase : Optional[Any] = self.lazy[idx]
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase : str = val
if left_element != right_element:
lowerCAmelCase : Optional[Any] = val
lowerCAmelCase : Union[str, Any] = val
lowerCAmelCase : int = True
lowerCAmelCase : int = True
return True
lowerCAmelCase : List[str] = (left_element + right_element) // 2
self.update(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.update(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = max(
self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] )
return True
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : str = False
if left_element != right_element:
lowerCAmelCase : Tuple = self.lazy[idx]
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase : Any = (left_element + right_element) // 2
lowerCAmelCase : Optional[int] = self.query(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = self.query(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ )
return max(snake_case__ , snake_case__ )
def __str__( self ):
"""simple docstring"""
return str([self.query(1 , 1 , self.size , snake_case__ , snake_case__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowerCAmelCase__ = 15
lowerCAmelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 108 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Dict = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
UpperCamelCase : Dict = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
UpperCamelCase : List[str] = DetaConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE_ , with_box_refine=SCREAMING_SNAKE_CASE_ , two_stage=SCREAMING_SNAKE_CASE_ , )
# set labels
UpperCamelCase : Any = '''huggingface/label-files'''
if "o365" in model_name:
UpperCamelCase : List[str] = 3_6_6
UpperCamelCase : Tuple = '''object365-id2label.json'''
else:
UpperCamelCase : Optional[Any] = 9_1
UpperCamelCase : Dict = '''coco-detection-id2label.json'''
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : List[str] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCamelCase : int = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase : Union[str, Any] = idalabel
UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
UpperCamelCase : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
UpperCamelCase : Union[str, Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = val
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
UpperCamelCase : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase : Tuple = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCamelCase : str = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : List[Any] = in_proj_weight[:dim, :]
UpperCamelCase : int = in_proj_bias[: dim]
UpperCamelCase : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase : str = in_proj_bias[
dim : dim * 2
]
UpperCamelCase : str = in_proj_weight[
-dim :, :
]
UpperCamelCase : Any = in_proj_bias[-dim :]
# fmt: on
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
UpperCamelCase : Optional[int] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase : Tuple = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase : str = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : List[Any] = in_proj_weight[:hidden_size, :]
UpperCamelCase : Dict = in_proj_bias[:hidden_size]
UpperCamelCase : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase : str = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase : int = in_proj_weight[-hidden_size:, :]
UpperCamelCase : List[str] = in_proj_bias[-hidden_size:]
def a ( ):
UpperCamelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
UpperCamelCase : List[str] = get_deta_config(SCREAMING_SNAKE_CASE_ )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase : Optional[int] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
UpperCamelCase : Union[str, Any] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
UpperCamelCase : List[str] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE_ , param.shape )
# rename keys
UpperCamelCase : Dict = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase : Any = state_dict.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = val
if "input_proj" in key:
UpperCamelCase : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase : str = state_dict.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = val
# finally, create HuggingFace model and load state dict
UpperCamelCase : List[Any] = DetaForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(SCREAMING_SNAKE_CASE_ )
# load image processor
UpperCamelCase : Optional[int] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCamelCase : List[Any] = encoding['''pixel_values''']
UpperCamelCase : Optional[int] = model(pixel_values.to(SCREAMING_SNAKE_CASE_ ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCamelCase : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase : Optional[Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCamelCase : List[Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE_ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE_ ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(F"""jozhang97/{model_name}""" )
processor.push_to_hub(F"""jozhang97/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCAmelCase : int = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 355 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = []
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_SCREAMING_SNAKE_CASE = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__lowerCamelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__lowerCamelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__lowerCamelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_SCREAMING_SNAKE_CASE = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__lowerCamelCase )
return next_generation
def lowerCamelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int ) ->list[Image.Image]:
_SCREAMING_SNAKE_CASE = []
for _ in range(__lowerCamelCase ):
# Create output image
_SCREAMING_SNAKE_CASE = Image.new("""RGB""" , (len(cells[0] ), len(__lowerCamelCase )) )
_SCREAMING_SNAKE_CASE = img.load()
# Save cells to image
for x in range(len(__lowerCamelCase ) ):
for y in range(len(cells[0] ) ):
_SCREAMING_SNAKE_CASE = 255 - cells[y][x] * 255
_SCREAMING_SNAKE_CASE = (colour, colour, colour)
# Save image
images.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = new_generation(__lowerCamelCase )
return images
if __name__ == "__main__":
lowercase_ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 58 |
'''simple docstring'''
from collections.abc import Sequence
def lowerCamelCase ( __lowerCamelCase : Sequence[float] , __lowerCamelCase : bool = False ) ->float:
if not arr:
return 0
_SCREAMING_SNAKE_CASE = 0 if allow_empty_subarrays else float("""-inf""" )
_SCREAMING_SNAKE_CASE = 0.0
for num in arr:
_SCREAMING_SNAKE_CASE = max(0 if allow_empty_subarrays else num , curr_sum + num )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 58 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCAmelCase = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
__UpperCAmelCase = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCAmelCase_ : List[str] = bs[:]
UpperCAmelCase_ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Union[str, Any] = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = set()
UpperCAmelCase_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : int = char
return pairs
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Any = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="replace" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=False , **_UpperCamelCase , ) -> Optional[Any]:
UpperCAmelCase_ : str = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
UpperCAmelCase_ : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
UpperCAmelCase_ : Dict = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
UpperCAmelCase_ : Optional[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
UpperCAmelCase_ : Union[str, Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Optional[int] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
super().__init__(
errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase_ : List[str] = json.load(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : str = errors # how to handle errors in decoding
UpperCAmelCase_ : Optional[Any] = bytes_to_unicode()
UpperCAmelCase_ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase , encoding='utf-8' ) as merges_handle:
UpperCAmelCase_ : Optional[Any] = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : int = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __UpperCAmelCase ( self ) -> Tuple:
return len(self.encoder )
def __UpperCAmelCase ( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Dict = tuple(_UpperCamelCase )
UpperCAmelCase_ : Any = get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(_UpperCamelCase , key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : str = bigram
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(_UpperCamelCase ):
try:
UpperCAmelCase_ : Optional[Any] = word.index(_UpperCamelCase , _UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : str = j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Dict = tuple(_UpperCamelCase )
UpperCAmelCase_ : Dict = new_word
if len(_UpperCamelCase ) == 1:
break
else:
UpperCAmelCase_ : Optional[Any] = get_pairs(_UpperCamelCase )
UpperCAmelCase_ : Any = ' '.join(_UpperCamelCase )
UpperCAmelCase_ : Tuple = word
return word
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
UpperCAmelCase_ : Any = []
for token in re.findall(self.pat , _UpperCamelCase ):
UpperCAmelCase_ : Any = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase ).split(' ' ) )
return bpe_tokens
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
return self.decoder.get(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
UpperCAmelCase_ : List[str] = ''.join(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : int = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase ) + '\n' )
UpperCAmelCase_ : Dict = 0
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase_ : Any = token_index
writer.write(' '.join(_UpperCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , **_UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Union[str, Any] = ' ' + text
return (text, kwargs)
| 145 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = '''umt5'''
_snake_case : Union[str, Any] = ['''past_key_values''']
def __init__( self , _UpperCamelCase=2_5_0_1_1_2 , _UpperCamelCase=5_1_2 , _UpperCamelCase=6_4 , _UpperCamelCase=1_0_2_4 , _UpperCamelCase=8 , _UpperCamelCase=None , _UpperCamelCase=6 , _UpperCamelCase=3_2 , _UpperCamelCase=1_2_8 , _UpperCamelCase=0.1 , _UpperCamelCase=1E-6 , _UpperCamelCase=1.0 , _UpperCamelCase="gated-gelu" , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="T5Tokenizer" , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> List[Any]:
super().__init__(
is_encoder_decoder=_UpperCamelCase , tokenizer_class=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[str] = d_model
UpperCAmelCase_ : Any = d_kv
UpperCAmelCase_ : Optional[int] = d_ff
UpperCAmelCase_ : List[Any] = num_layers
UpperCAmelCase_ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Optional[int] = relative_attention_num_buckets
UpperCAmelCase_ : Dict = relative_attention_max_distance
UpperCAmelCase_ : Tuple = dropout_rate
UpperCAmelCase_ : Union[str, Any] = layer_norm_epsilon
UpperCAmelCase_ : Optional[int] = initializer_factor
UpperCAmelCase_ : List[str] = feed_forward_proj
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[Any] = self.feed_forward_proj.split('-' )
UpperCAmelCase_ : List[Any] = act_info[-1]
UpperCAmelCase_ : Union[str, Any] = act_info[0] == 'gated'
if len(_UpperCamelCase ) > 1 and act_info[0] != "gated" or len(_UpperCamelCase ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ : Optional[int] = 'gelu_new'
@property
def __UpperCAmelCase ( self ) -> int:
return self.d_model
@property
def __UpperCAmelCase ( self ) -> Any:
return self.num_heads
@property
def __UpperCAmelCase ( self ) -> List[Any]:
return self.num_layers
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
UpperCAmelCase_ : Optional[int] = 'past_encoder_sequence + sequence'
UpperCAmelCase_ : str = {0: 'batch'}
UpperCAmelCase_ : Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCAmelCase_ : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
UpperCAmelCase_ : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCamelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCAmelCase ( self ) -> int:
return 1_3
@property
def __UpperCAmelCase ( self ) -> float:
return 5E-4
| 145 | 1 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = 'us-east-1' # defaults region
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : str
_snake_case : List[Any] = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_snake_case : Any = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
_snake_case : Tuple = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def __UpperCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __UpperCAmelCase ( self ) -> str:
return f"{self.framework}-transfromers-test"
@property
def __UpperCAmelCase ( self ) -> str:
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __UpperCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = SageMakerTestEnvironment(framework=request.cls.framework )
| 29 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = [0]
__lowerCamelCase : Any = [0]
__lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
__lowerCamelCase : List[str] = [60]
__lowerCamelCase : Union[str, Any] = [10]
__lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Optional[int] = 3
__lowerCamelCase : int = [1, 2, 3]
__lowerCamelCase : str = [3, 2, 1]
__lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 5 )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = 50
__lowerCamelCase : List[str] = [60, 1_00, 1_20]
__lowerCamelCase : List[str] = [10, 20, 30]
__lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 185 | 0 |
lowerCamelCase : Optional[int] = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase : Tuple = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase : Optional[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 208 |
from ..utils import DummyObject, requires_backends
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self : Any , *A_ : Any , **A_ : Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls : Tuple , *A_ : Dict , **A_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls : int , *A_ : Dict , **A_ : List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 208 | 1 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Optional[Any] = """mask2former"""
a__ : Union[str, Any] = ["""swin"""]
a__ : Dict = {"""hidden_size""": """hidden_dim"""}
def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 1_024 , SCREAMING_SNAKE_CASE = "relu" , SCREAMING_SNAKE_CASE = 6 , SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 2_048 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 4 , SCREAMING_SNAKE_CASE = 255 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 2.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 12_544 , SCREAMING_SNAKE_CASE = 3.0 , SCREAMING_SNAKE_CASE = 0.75 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
snake_case : List[str] = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Tuple = backbone_config.pop("model_type" )
snake_case : Dict = CONFIG_MAPPING[backbone_model_type]
snake_case : Optional[int] = config_class.from_dict(SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
snake_case : List[str] = backbone_config
snake_case : Optional[int] = feature_size
snake_case : Optional[int] = mask_feature_size
snake_case : Optional[int] = hidden_dim
snake_case : List[str] = encoder_feedforward_dim
snake_case : Dict = activation_function
snake_case : Optional[Any] = encoder_layers
snake_case : Any = decoder_layers
snake_case : Optional[int] = num_attention_heads
snake_case : List[str] = dropout
snake_case : List[Any] = dim_feedforward
snake_case : Tuple = pre_norm
snake_case : int = enforce_input_projection
snake_case : str = common_stride
snake_case : List[Any] = ignore_value
snake_case : Optional[int] = num_queries
snake_case : Optional[int] = no_object_weight
snake_case : Dict = class_weight
snake_case : Tuple = mask_weight
snake_case : Tuple = dice_weight
snake_case : Tuple = train_num_points
snake_case : int = oversample_ratio
snake_case : Dict = importance_sample_ratio
snake_case : Tuple = init_std
snake_case : Dict = init_xavier_std
snake_case : List[Any] = use_auxiliary_loss
snake_case : Dict = feature_strides
snake_case : List[Any] = output_auxiliary_logits
snake_case : Union[str, Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase_ ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = copy.deepcopy(self.__dict__ )
snake_case : str = self.backbone_config.to_dict()
snake_case : Optional[int] = self.__class__.model_type
return output
| 148 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : List[str] ):
# Initialise PyTorch model
snake_case : Optional[Any] = TaConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case : Tuple = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 148 | 1 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict ) -> int:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = hf_hub_url(repo_id=_UpperCAmelCase , path=_UpperCAmelCase , revision=_UpperCAmelCase )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_UpperCAmelCase )}""" | 359 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "rwkv"
snake_case__ = {"max_position_embeddings": "context_length"}
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=50277 , UpperCAmelCase : Dict=1024 , UpperCAmelCase : int=4096 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : str=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=1E-5 , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : int=0 , UpperCAmelCase : Tuple=6 , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[Any]=True , **UpperCAmelCase : Any , ):
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : List[Any] = context_length
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : Tuple = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCamelCase : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCamelCase : str = layer_norm_epsilon
__lowerCamelCase : Dict = rescale_every
__lowerCamelCase : Optional[Any] = use_cache
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : Tuple = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) | 64 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase_ ( lowerCamelCase__ ):
return (data["data"], data["target"])
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = XGBClassifier()
classifier.fit(lowerCamelCase__ , lowerCamelCase__ )
return classifier
def lowerCamelCase_ ( ):
lowerCamelCase_ = load_iris()
lowerCamelCase_ , lowerCamelCase_ = data_handling(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_test_split(
lowerCamelCase__ , lowerCamelCase__ , test_size=0.25 )
lowerCamelCase_ = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowerCamelCase_ = xgboost(lowerCamelCase__ , lowerCamelCase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , display_labels=lowerCamelCase__ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 19 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( A ) -> List[Any]:
snake_case = 3_8_4
if "tiny" in model_name:
snake_case = [3, 3, 9, 3]
snake_case = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
snake_case = [3, 3, 2_7, 3]
snake_case = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
snake_case = [3, 3, 2_7, 3]
snake_case = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
snake_case = 5_1_2
if "large" in model_name:
snake_case = [3, 3, 2_7, 3]
snake_case = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
snake_case = 7_6_8
if "xlarge" in model_name:
snake_case = [3, 3, 2_7, 3]
snake_case = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
snake_case = 1_0_2_4
# set label information
snake_case = 1_5_0
snake_case = 'huggingface/label-files'
snake_case = 'ade20k-id2label.json'
snake_case = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = {v: k for k, v in idalabel.items()}
snake_case = ConvNextConfig(
depths=A , hidden_sizes=A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
snake_case = UperNetConfig(
backbone_config=A , auxiliary_in_channels=A , num_labels=A , idalabel=A , labelaid=A , )
return config
def __magic_name__ ( A ) -> Dict:
snake_case = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __magic_name__ ( A , A , A ) -> int:
snake_case = dct.pop(A )
snake_case = val
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
snake_case = model_name_to_url[model_name]
snake_case = torch.hub.load_state_dict_from_url(A , map_location='cpu' )['state_dict']
snake_case = get_upernet_config(A )
snake_case = UperNetForSemanticSegmentation(A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
snake_case = state_dict.pop(A )
if "bn" in key:
snake_case = key.replace('bn' , 'batch_norm' )
snake_case = val
# rename keys
snake_case = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
model.load_state_dict(A )
# verify on image
snake_case = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
snake_case = Image.open(requests.get(A , stream=A ).raw ).convert('RGB' )
snake_case = SegformerImageProcessor()
snake_case = processor(A , return_tensors='pt' ).pixel_values
with torch.no_grad():
snake_case = model(A )
if model_name == "upernet-convnext-tiny":
snake_case = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
snake_case = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
snake_case = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
snake_case = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
snake_case = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f"upernet-convnext-{size}" for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCAmelCase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 356 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : int = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Dict = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : Any = downstream_dict['''projector.weight''']
SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict['''projector.bias''']
SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict['''model.post_net.linear.weight''']
SCREAMING_SNAKE_CASE : int = downstream_dict['''model.post_net.linear.bias''']
return model
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict['''model.linear.weight''']
SCREAMING_SNAKE_CASE : str = downstream_dict['''model.linear.bias''']
return model
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = UniSpeechSatForXVector.from_pretrained(_lowercase , config=_lowercase )
SCREAMING_SNAKE_CASE : str = downstream_dict['''connector.weight''']
SCREAMING_SNAKE_CASE : Dict = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
SCREAMING_SNAKE_CASE : List[str] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
SCREAMING_SNAKE_CASE : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
SCREAMING_SNAKE_CASE : Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
SCREAMING_SNAKE_CASE : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
SCREAMING_SNAKE_CASE : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
SCREAMING_SNAKE_CASE : Any = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_lowercase , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Any = checkpoint['''Downstream''']
SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained(
_lowercase , return_attention_mask=_lowercase , do_normalize=_lowercase )
SCREAMING_SNAKE_CASE : Tuple = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
SCREAMING_SNAKE_CASE : str = convert_classification(_lowercase , _lowercase , _lowercase )
elif arch.endswith('''ForAudioFrameClassification''' ):
SCREAMING_SNAKE_CASE : List[Any] = convert_diarization(_lowercase , _lowercase , _lowercase )
elif arch.endswith('''ForXVector''' ):
SCREAMING_SNAKE_CASE : int = convert_xvector(_lowercase , _lowercase , _lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE : int = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 258 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ViTImageProcessor if is_vision_available() else None
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =(3, 3_2, 1_2_8)
__lowercase =tempfile.mkdtemp()
# fmt: off
__lowercase =['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__lowercase =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase))))
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_lowerCAmelCase) + '\n')
__lowercase ={
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 3_2, 'width': 1_2_8},
}
__lowercase =os.path.join(self.tmpdirname , _lowerCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int]):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase)
def __lowerCamelCase ( self : Any , **_lowerCAmelCase : str):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)
__lowercase =Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1))
return image_input
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.get_tokenizer()
__lowercase =self.get_image_processor()
__lowercase =MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
__lowercase =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.get_tokenizer()
__lowercase =self.get_image_processor()
__lowercase =MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
__lowercase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__lowercase =self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0)
__lowercase =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase =self.prepare_image_inputs()
__lowercase =image_processor(_lowerCAmelCase , return_tensors='np')
__lowercase =processor(images=_lowerCAmelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase ='test'
__lowercase =processor(text=_lowerCAmelCase)
__lowercase =tokenizer(_lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase ='test'
__lowercase =self.prepare_image_inputs()
__lowercase =processor(text=_lowerCAmelCase , images=_lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase):
processor()
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowercase =processor.char_decode(_lowerCAmelCase)
__lowercase =tokenizer.batch_decode(_lowerCAmelCase)
__lowercase =[seq.replace(' ' , '') for seq in decoded_tok]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase =None
__lowercase =self.prepare_image_inputs()
__lowercase =processor(text=_lowerCAmelCase , images=_lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase)
__lowercase =torch.randn(1 , 2_7 , 3_8)
__lowercase =torch.randn(1 , 2_7 , 5_0_2_5_7)
__lowercase =torch.randn(1 , 2_7 , 3_0_5_2_2)
__lowercase =processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 166 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =0
@slow
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(_lowerCAmelCase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(_lowerCAmelCase) , 0)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 2_0)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoConfig.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
# Check that tokenizer_type ≠ model_type
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowerCAmelCase , 'vocab.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='bert' , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowerCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowerCAmelCase , 'merges.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='gpt2' , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowerCAmelCase , 'vocab.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='bert')
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowerCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowerCAmelCase , 'merges.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='gpt2')
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
with pytest.raises(_lowerCAmelCase):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx')
@require_tokenizers
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowercase =tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased')
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _lowerCAmelCase)
else:
self.assertEqual(tokenizer.do_lower_case , _lowerCAmelCase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
@require_tokenizers
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_lowerCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__lowercase =tokenizer_class.from_pretrained('julien-c/herlolip-not-exists')
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =TOKENIZER_MAPPING.values()
__lowercase =[]
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_lowerCAmelCase) , _lowerCAmelCase)
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased') , _lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_lowerCAmelCase)
__lowercase ='Hello, world. How are you?'
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
__lowercase =AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_lowerCAmelCase)
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
@require_tokenizers
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config')
self.assertEqual(type(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0)
self.assertEqual(tokenizer.unk_token , '[UNK]')
self.assertEqual(tokenizer.padding_side , 'right')
self.assertEqual(tokenizer.truncation_side , 'right')
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 1_2)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('ctrl')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =get_tokenizer_config('bert-base-cased')
__lowercase =config.pop('_commit_hash' , _lowerCAmelCase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_lowerCAmelCase , {'do_lower_case': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__lowercase =get_tokenizer_config(_lowerCAmelCase)
self.assertDictEqual(_lowerCAmelCase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =get_tokenizer_config(_lowerCAmelCase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer')
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
try:
AutoConfig.register('custom' , _lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase):
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
__lowercase =CustomTokenizer.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
try:
AutoConfig.register('custom' , _lowerCAmelCase)
# Can register in two steps
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase):
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase =BertTokenizerFast.from_pretrained(_lowerCAmelCase)
bert_tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =CustomTokenizerFast.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
with self.assertRaises(_lowerCAmelCase):
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase):
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
@require_tokenizers
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = False
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = NewTokenizer
lowerCAmelCase__ = False
try:
AutoConfig.register('custom' , _lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
# If remote code is not set, the default is to use local
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertTrue(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
with self.assertRaisesRegex(
_lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'):
__lowercase =AutoTokenizer.from_pretrained('bert-base')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , revision='aaaaaa')
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 166 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[int] = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_UpperCAmelCase : Union[str, Any] = datasets.logging.get_logger(__name__)
_UpperCAmelCase : Tuple = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_UpperCAmelCase : int = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_UpperCAmelCase : Union[str, Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="dummy_doc" ):
lowercase :str = {doc: key_lines}
lowercase :Union[str, Any] = {doc: sys_lines}
lowercase :Tuple = {}
lowercase :Optional[Any] = 0
lowercase :str = 0
lowercase :Optional[Any] = 0
lowercase :str = 0
lowercase :Union[str, Any] = 0
lowercase :int = 0
lowercase , lowercase :str = reader.get_doc_mentions(lowerCamelCase, key_doc_lines[doc], lowerCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowercase :Any = reader.set_annotated_parse_trees(lowerCamelCase, key_doc_lines[doc], lowerCamelCase, lowerCamelCase )
lowercase , lowercase :int = reader.get_doc_mentions(lowerCamelCase, sys_doc_lines[doc], lowerCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowercase :str = reader.set_annotated_parse_trees(lowerCamelCase, key_doc_lines[doc], lowerCamelCase, lowerCamelCase )
if remove_nested:
lowercase , lowercase :List[str] = reader.remove_nested_coref_mentions(lowerCamelCase, lowerCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowercase , lowercase :Any = reader.remove_nested_coref_mentions(lowerCamelCase, lowerCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowercase :Optional[Any] = reader.get_mention_assignments(lowerCamelCase, lowerCamelCase )
lowercase :str = reader.get_mention_assignments(lowerCamelCase, lowerCamelCase )
lowercase :Optional[int] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"Number of resulting singleton clusters in the key "
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"files, respectively" )
return doc_coref_infos
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :Union[str, Any] = get_coref_infos(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase :List[str] = {}
lowercase :Dict = 0
lowercase :Tuple = 0
for name, metric in metrics:
lowercase , lowercase , lowercase :int = evaluator.evaluate_documents(lowerCamelCase, lowerCamelCase, beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ), F"Recall: {recall * 100:.2f}", F" Precision: {precision * 100:.2f}", F" F1: {fa * 100:.2f}", )
if conll_subparts_num == 3:
lowercase :Any = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"conll_score": conll} )
return output_scores
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :str = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
lowercase :Union[str, Any] = line.split()[5]
if not parse_col == "-":
lowercase :Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowerCAmelCase ( datasets.Metric):
def SCREAMING_SNAKE_CASE ( self: Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Dict , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Dict=False ):
lowercase :Any = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
lowercase :List[str] = util.check_gold_parse_annotation(_lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowercase :List[str] = evaluate(
key_lines=_lowerCAmelCase , sys_lines=_lowerCAmelCase , metrics=_lowerCAmelCase , NP_only=_lowerCAmelCase , remove_nested=_lowerCAmelCase , keep_singletons=_lowerCAmelCase , min_span=_lowerCAmelCase , )
return score
| 158 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
if len(_UpperCAmelCase ) == 0:
return []
_A : Union[str, Any] = min(_UpperCAmelCase ), max(_UpperCAmelCase )
_A : Dict = int(max_value - min_value ) + 1
_A : list[list] = [[] for _ in range(_UpperCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_UpperCAmelCase )
return [v for bucket in buckets for v in sorted(_UpperCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 26 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Any ):
A_ = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
A_ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler("sample_euler" )
A_ = 'A painting of a squirrel eating a burger'
A_ = torch.manual_seed(0 )
A_ = sd_pipe([prompt] , generator=__snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : Optional[Any] ):
A_ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
A_ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler("sample_euler" )
A_ = 'A painting of a squirrel eating a burger'
A_ = torch.manual_seed(0 )
A_ = sd_pipe([prompt] , generator=__snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __A ( self : str ):
A_ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
A_ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
A_ = 'A painting of a squirrel eating a burger'
A_ = torch.manual_seed(0 )
A_ = sd_pipe(
[prompt] , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=__snake_case , )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 358 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , ) | 329 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =[]
SCREAMING_SNAKE_CASE__ : str =[]
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) if (len(UpperCamelCase__ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(UpperCamelCase__ ), '''Postfix'''.center(UpperCamelCase__ ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase__ ) == 0:
stack.append(UpperCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase__ ) # push x to stack
print(
x.center(8 ), (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ), (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ), sep=''' | ''', ) # Output in tabular format
while len(UpperCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ), (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ), sep=''' | ''', ) # Output in tabular format
return "".join(UpperCamelCase__ ) # return Postfix as str
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase__ ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE__ : Tuple =''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE__ : Any ='''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(UpperCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a_ = input('\nEnter an Infix Equation = ') # Input an Infix equation
a_ = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)') | 152 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a_ = re.compile(R'\s+')
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(UpperCamelCase__, '''''', example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def _a( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[len(UpperCamelCase__ ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(UpperCamelCase__ ), "line_max": max(UpperCamelCase__ )}
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Any ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Optional[Any]=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''auto-generated''', '''autogenerated''', '''automatically generated''']
SCREAMING_SNAKE_CASE__ : Dict =example['''content'''].splitlines()
for _, line in zip(range(UpperCamelCase__ ), UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple=5, UpperCamelCase__ : Optional[Any]=0.0_5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =['''unit tests''', '''test file''', '''configuration file''']
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
# first test
for _, line in zip(range(UpperCamelCase__ ), UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
SCREAMING_SNAKE_CASE__ : List[str] =example['''content'''].count('''\n''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''def ''', '''class ''', '''for ''', '''while ''']
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict=4 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =tokenizer(example['''content'''], truncation=UpperCamelCase__ )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(example['''content'''] ) / len(UpperCamelCase__ )
return {"ratio": ratio}
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict ={}
results.update(get_hash(UpperCamelCase__ ) )
results.update(line_stats(UpperCamelCase__ ) )
results.update(alpha_stats(UpperCamelCase__ ) )
results.update(char_token_ratio(UpperCamelCase__ ) )
results.update(is_autogenerated(UpperCamelCase__ ) )
results.update(is_config_or_test(UpperCamelCase__ ) )
results.update(has_no_keywords(UpperCamelCase__ ) )
results.update(has_few_assignments(UpperCamelCase__ ) )
return results
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
'''simple docstring'''
if not check_uniques(UpperCamelCase__, UpperCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
with open(UpperCamelCase__, '''rb''' ) as f_in:
with gzip.open(str(UpperCamelCase__ ) + '''.gz''', '''wb''', compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCamelCase__, UpperCamelCase__ )
os.unlink(UpperCamelCase__ )
# Settings
a_ = HfArgumentParser(PreprocessingArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
a_ = time.time()
a_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
a_ = set(ds.unique('hash'))
a_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
a_ = time.time()
a_ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a_ = time.time()
a_ , a_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
a_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
a_ = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
a_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a_ = str(data_dir / F'''file-{file_number+1:012}.json''')
a_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''') | 152 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__a = TypeVar("T")
class UpperCAmelCase_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ):
snake_case__ : Any | T = None
snake_case__ : int = len(snake_case_ )
snake_case__ : list[T] = [any_type for _ in range(self.N )] + arr
snake_case__ : int = fnc
self.build()
def lowerCamelCase ( self : Union[str, Any] ):
for p in range(self.N - 1 , 0 , -1 ):
snake_case__ : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase ( self : Dict , snake_case_ : int , snake_case_ : T ):
p += self.N
snake_case__ : List[Any] = v
while p > 1:
snake_case__ : int = p // 2
snake_case__ : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase ( self : List[str] , snake_case_ : int , snake_case_ : int ): # noqa: E741
snake_case__ , snake_case__ : Tuple = l + self.N, r + self.N
snake_case__ : T | None = None
while l <= r:
if l % 2 == 1:
snake_case__ : int = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] )
if r % 2 == 0:
snake_case__ : Union[str, Any] = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] )
snake_case__ , snake_case__ : Union[str, Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__a = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__a = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__a = SegmentTree(test_array, min)
__a = SegmentTree(test_array, max)
__a = SegmentTree(test_array, lambda a, b: a + b)
def __snake_case( ) -> None:
for i in range(len(_lowerCAmelCase ) ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
snake_case__ : Any = reduce(_lowerCAmelCase , test_array[i : j + 1] )
snake_case__ : Union[str, Any] = reduce(_lowerCAmelCase , test_array[i : j + 1] )
snake_case__ : Optional[Any] = reduce(lambda _lowerCAmelCase , _lowerCAmelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_lowerCAmelCase , _lowerCAmelCase )
assert max_range == max_segment_tree.query(_lowerCAmelCase , _lowerCAmelCase )
assert sum_range == sum_segment_tree.query(_lowerCAmelCase , _lowerCAmelCase )
test_all_segments()
for index, value in test_updates.items():
__a = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 43 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Tuple=0 ):
snake_case__ : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case_ ) )
snake_case__ : List[str] = np.random.RandomState(snake_case_ )
snake_case__ : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : Union[str, Any] = pipe(**snake_case_ ).images
snake_case__ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case__ : int = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Dict ):
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = self.get_dummy_inputs()
snake_case__ : int = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Tuple = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
# warmup pass to apply optimizations
snake_case__ : List[Any] = pipe(**self.get_dummy_inputs() )
snake_case__ : List[str] = self.get_dummy_inputs()
snake_case__ : Optional[int] = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Any = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : str ):
snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = self.get_dummy_inputs()
snake_case__ : List[Any] = pipe(**snake_case_ ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Optional[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : str ):
snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : Tuple = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : int = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[str] = self.get_dummy_inputs()
snake_case__ : List[str] = pipe(**snake_case_ ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : List[str] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : Dict ):
snake_case__ : Tuple = ort.SessionOptions()
snake_case__ : Optional[Any] = False
return options
def lowerCamelCase ( self : List[str] ):
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : str = init_image.resize((768, 512) )
# using the PNDM scheduler by default
snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = """A fantasy landscape, trending on artstation"""
snake_case__ : str = np.random.RandomState(0 )
snake_case__ : Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : str = output.images
snake_case__ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Optional[Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : List[Any] = init_image.resize((768, 512) )
snake_case__ : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = """A fantasy landscape, trending on artstation"""
snake_case__ : Optional[int] = np.random.RandomState(0 )
snake_case__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Any = output.images
snake_case__ : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 43 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 1000 )-> int:
UpperCamelCase = -1
UpperCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase = a * b * c
if candidate >= product:
UpperCamelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase : Tuple = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = ["ConvNextFeatureExtractor"]
__UpperCAmelCase : Any = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 315 |
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case_ = IFInpaintingPipeline
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=0 ) -> str:
'''simple docstring'''
if str(A__ ).startswith('''mps''' ):
__a = torch.manual_seed(A__ )
else:
__a = torch.Generator(device=A__ ).manual_seed(A__ )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 6 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =IFInpaintingPipeline
lowercase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowercase_ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ : str =PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self):
return self._get_dummy_components()
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 101 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_snake_case : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase : Any , **lowerCamelCase : Tuple ) -> int:
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any]=None ) -> Optional[int]:
__snake_case : Optional[Any] = {}
if top_k is not None:
__snake_case : List[Any] = top_k
return {}, {}, postprocess_params
def __call__( self : List[Any] , lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase : Dict ) -> Optional[int]:
return super().__call__(lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] ) -> int:
__snake_case : Any = load_image(lowerCamelCase )
__snake_case : str = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
return model_inputs
def __snake_case ( self : int , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : List[Any] = self.model(**lowerCamelCase )
return model_outputs
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=5 ) -> List[str]:
if top_k > self.model.config.num_labels:
__snake_case : int = self.model.config.num_labels
if self.framework == "pt":
__snake_case : Optional[Any] = model_outputs.logits.softmax(-1 )[0]
__snake_case , __snake_case : List[str] = probs.topk(lowerCamelCase )
elif self.framework == "tf":
__snake_case : Tuple = stable_softmax(model_outputs.logits , axis=-1 )[0]
__snake_case : Optional[Any] = tf.math.top_k(lowerCamelCase , k=lowerCamelCase )
__snake_case , __snake_case : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__snake_case : Any = scores.tolist()
__snake_case : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 134 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_snake_case : List[str] = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = _TestCommandArgs(dataset=__lowerCamelCase , all_configs=__lowerCamelCase , save_infos=__lowerCamelCase )
__snake_case : List[Any] = TestCommand(*__lowerCamelCase )
test_command.run()
__snake_case : List[Any] = os.path.join(__lowerCamelCase , "README.md" )
assert os.path.exists(__lowerCamelCase )
__snake_case : Optional[Any] = DatasetInfosDict.from_directory(__lowerCamelCase )
__snake_case : List[str] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_3_5_1_5_6_3,
"num_examples": 1_0_0_0_0,
},
{
"name": "validation",
"num_bytes": 2_3_8_4_1_8,
"num_examples": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__snake_case , __snake_case : Tuple = getattr(dataset_infos["default"] , __lowerCamelCase ), getattr(expected_dataset_infos["default"] , __lowerCamelCase )
if key == "num_bytes":
assert is_apercent_close(__lowerCamelCase , __lowerCamelCase )
elif key == "splits":
assert list(__lowerCamelCase ) == list(__lowerCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 134 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a : Any = logging.get_logger(__name__)
a : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : Dict = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
a : Tuple = {'mobilebert-uncased': 512}
a : Optional[int] = {}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = MobileBertTokenizer
def __init__( self : int , lowercase_ : Tuple=None , lowercase_ : int=None , lowercase_ : int=True , lowercase_ : int="[UNK]" , lowercase_ : Union[str, Any]="[SEP]" , lowercase_ : str="[PAD]" , lowercase_ : List[str]="[CLS]" , lowercase_ : List[str]="[MASK]" , lowercase_ : Union[str, Any]=True , lowercase_ : List[str]=None , **lowercase_ : int , ):
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowercase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase_ ) != tokenize_chinese_chars
):
snake_case_ = getattr(lowercase_ , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**lowercase_ )
snake_case_ = do_lower_case
def A_ ( self : str , lowercase_ : List[str] , lowercase_ : Optional[int]=None ):
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
snake_case_ = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 56 |
'''simple docstring'''
import math
from collections.abc import Callable
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
snake_case_ = xa
snake_case_ = xa
while True:
if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
snake_case_ = x_na - (
function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
snake_case_ = x_na
snake_case_ = x_na
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
return math.pow(__UpperCAmelCase, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
__magic_name__ : Union[str, Any] = 1
__magic_name__ : str = 3
__magic_name__ : Any = (32, 32)
__magic_name__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_A )
return image
@property
def __lowerCAmelCase ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
__magic_name__ : str = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_A , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) -> str:
torch.manual_seed(0 )
__magic_name__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self : Dict ) -> Dict:
torch.manual_seed(0 )
__magic_name__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(_A )
def __lowerCAmelCase ( self : Any ) -> Dict:
__magic_name__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Any = self.dummy_cond_unet_upscale
__magic_name__ : Optional[int] = DDPMScheduler()
__magic_name__ : Dict = DDIMScheduler(prediction_type='v_prediction' )
__magic_name__ : Dict = self.dummy_vae
__magic_name__ : Union[str, Any] = self.dummy_text_encoder
__magic_name__ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__magic_name__ : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__magic_name__ : int = StableDiffusionUpscalePipeline(
unet=_A , low_res_scheduler=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , max_noise_level=350 , )
__magic_name__ : Optional[int] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Any = 'A painting of a squirrel eating a burger'
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(0 )
__magic_name__ : List[Any] = sd_pipe(
[prompt] , image=_A , generator=_A , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
__magic_name__ : List[Any] = output.images
__magic_name__ : Any = torch.Generator(device=_A ).manual_seed(0 )
__magic_name__ : List[Any] = sd_pipe(
[prompt] , image=_A , generator=_A , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=_A , )[0]
__magic_name__ : Dict = image[0, -3:, -3:, -1]
__magic_name__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
__magic_name__ : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__magic_name__ : int = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ : List[Any] = self.dummy_cond_unet_upscale
__magic_name__ : List[Any] = DDPMScheduler()
__magic_name__ : str = DDIMScheduler(prediction_type='v_prediction' )
__magic_name__ : Tuple = self.dummy_vae
__magic_name__ : Optional[Any] = self.dummy_text_encoder
__magic_name__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__magic_name__ : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : List[Any] = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__magic_name__ : Dict = StableDiffusionUpscalePipeline(
unet=_A , low_res_scheduler=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , max_noise_level=350 , )
__magic_name__ : Optional[Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Union[str, Any] = 'A painting of a squirrel eating a burger'
__magic_name__ : str = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
__magic_name__ : List[str] = output.images
assert image.shape[0] == 2
__magic_name__ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(0 )
__magic_name__ : List[str] = sd_pipe(
[prompt] , image=_A , generator=_A , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
__magic_name__ : Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __lowerCAmelCase ( self : List[str] ) -> Any:
__magic_name__ : Optional[int] = self.dummy_cond_unet_upscale
__magic_name__ : str = DDPMScheduler()
__magic_name__ : Union[str, Any] = DDIMScheduler(prediction_type='v_prediction' )
__magic_name__ : List[Any] = self.dummy_vae
__magic_name__ : Optional[int] = self.dummy_text_encoder
__magic_name__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__magic_name__ : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__magic_name__ : Tuple = unet.half()
__magic_name__ : Any = text_encoder.half()
# make sure here that pndm scheduler skips prk
__magic_name__ : Tuple = StableDiffusionUpscalePipeline(
unet=_A , low_res_scheduler=_A , scheduler=_A , vae=_A , text_encoder=_A , tokenizer=_A , max_noise_level=350 , )
__magic_name__ : Any = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
__magic_name__ : int = 'A painting of a squirrel eating a burger'
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
__magic_name__ : List[str] = sd_pipe(
[prompt] , image=_A , generator=_A , num_inference_steps=2 , output_type='np' , ).images
__magic_name__ : Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : int ) -> Dict:
__magic_name__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__magic_name__ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
__magic_name__ : Any = 'stabilityai/stable-diffusion-x4-upscaler'
__magic_name__ : Dict = StableDiffusionUpscalePipeline.from_pretrained(_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
__magic_name__ : Optional[int] = 'a cat sitting on a park bench'
__magic_name__ : List[Any] = torch.manual_seed(0 )
__magic_name__ : List[Any] = pipe(
prompt=_A , image=_A , generator=_A , output_type='np' , )
__magic_name__ : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__magic_name__ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
__magic_name__ : Optional[Any] = 'stabilityai/stable-diffusion-x4-upscaler'
__magic_name__ : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
__magic_name__ : str = 'a cat sitting on a park bench'
__magic_name__ : Tuple = torch.manual_seed(0 )
__magic_name__ : Any = pipe(
prompt=_A , image=_A , generator=_A , output_type='np' , )
__magic_name__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__magic_name__ : str = 'stabilityai/stable-diffusion-x4-upscaler'
__magic_name__ : int = StableDiffusionUpscalePipeline.from_pretrained(
_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[Any] = 'a cat sitting on a park bench'
__magic_name__ : Dict = torch.manual_seed(0 )
__magic_name__ : Dict = pipe(
prompt=_A , image=_A , generator=_A , num_inference_steps=5 , output_type='np' , )
__magic_name__ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 275 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase :str = logging.get_logger(__name__)
lowerCAmelCase :str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
lowerCAmelCase :List[str] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = {}
with open(lowerCAmelCase , 'r' ) as file:
for line_number, line in enumerate(lowerCAmelCase ):
__magic_name__ : Optional[Any] = line.strip()
if line:
__magic_name__ : Optional[int] = line.split()
__magic_name__ : Any = line_number
__magic_name__ : Union[str, Any] = words[0]
__magic_name__ : Dict = value
return result
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
for attribute in key.split('.' ):
__magic_name__ : Optional[Any] = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase ):
__magic_name__ : Optional[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
__magic_name__ : List[Any] = 'param'
if weight_type is not None and weight_type != "param":
__magic_name__ : List[str] = getattr(lowerCAmelCase , lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
__magic_name__ : Tuple = hf_pointer
for attribute in hf_param_name.split('.' ):
__magic_name__ : str = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Union[str, Any] = shape_pointer.shape
# let's reduce dimension
__magic_name__ : int = value[0]
else:
__magic_name__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__magic_name__ : Optional[Any] = value
elif weight_type == "weight_g":
__magic_name__ : List[str] = value
elif weight_type == "weight_v":
__magic_name__ : Optional[int] = value
elif weight_type == "bias":
__magic_name__ : Optional[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__magic_name__ : Optional[int] = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[str] = value
else:
__magic_name__ : int = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase ):
__magic_name__ : List[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
__magic_name__ : Dict = 'param'
if weight_type is not None and weight_type != "param":
__magic_name__ : Union[str, Any] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__magic_name__ : Optional[Any] = '.'.join([key, hf_param_name] )
else:
__magic_name__ : int = key
__magic_name__ : int = value if 'lm_head' in full_key else value[0]
lowerCAmelCase :int = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple=None , lowerCAmelCase : Tuple=None ):
"""simple docstring"""
__magic_name__ : Dict = False
for key, mapped_key in MAPPING.items():
__magic_name__ : int = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__magic_name__ : Union[str, Any] = True
if "*" in mapped_key:
__magic_name__ : List[Any] = name.split(lowerCAmelCase )[0].split('.' )[-2]
__magic_name__ : List[str] = mapped_key.replace('*' , lowerCAmelCase )
if "weight_g" in name:
__magic_name__ : str = 'weight_g'
elif "weight_v" in name:
__magic_name__ : Optional[int] = 'weight_v'
elif "bias" in name:
__magic_name__ : int = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__ : List[str] = 'weight'
else:
__magic_name__ : Any = None
if hf_dict is not None:
rename_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return is_used
return is_used
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = []
__magic_name__ : Any = fairseq_model.state_dict()
__magic_name__ : Optional[int] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
__magic_name__ : Optional[int] = True
else:
__magic_name__ : str = load_wavaveca_layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Any = full_name.split('conv_layers.' )[-1]
__magic_name__ : int = name.split('.' )
__magic_name__ : Any = int(items[0] )
__magic_name__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__magic_name__ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__magic_name__ : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__magic_name__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__magic_name__ : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=False ):
"""simple docstring"""
if config_path is not None:
__magic_name__ : int = WavaVecaConfig.from_pretrained(lowerCAmelCase )
else:
__magic_name__ : List[str] = WavaVecaConfig()
if is_seq_class:
__magic_name__ : Any = read_txt_into_dict(lowerCAmelCase )
__magic_name__ : Optional[Any] = idalabel
__magic_name__ : Union[str, Any] = WavaVecaForSequenceClassification(lowerCAmelCase )
__magic_name__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
feature_extractor.save_pretrained(lowerCAmelCase )
elif is_finetuned:
if dict_path:
__magic_name__ : str = Dictionary.load(lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__magic_name__ : Dict = target_dict.pad_index
__magic_name__ : Union[str, Any] = target_dict.bos_index
__magic_name__ : Union[str, Any] = target_dict.eos_index
__magic_name__ : Union[str, Any] = len(target_dict.symbols )
__magic_name__ : Dict = os.path.join(lowerCAmelCase , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase ) )
return
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
__magic_name__ : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__magic_name__ : Any = 0
__magic_name__ : Optional[int] = 1
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[str] = WavaVecaCTCTokenizer(
lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase , )
__magic_name__ : Tuple = True if config.feat_extract_norm == 'layer' else False
__magic_name__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
__magic_name__ : List[Any] = WavaVecaProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
__magic_name__ : Dict = WavaVecaForCTC(lowerCAmelCase )
else:
__magic_name__ : Tuple = WavaVecaForPreTraining(lowerCAmelCase )
if is_finetuned or is_seq_class:
__magic_name__ , __magic_name__ , __magic_name__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__magic_name__ : Optional[Any] = argparse.Namespace(task='audio_pretraining' )
__magic_name__ : Dict = fairseq.tasks.setup_task(lowerCAmelCase )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase )
__magic_name__ : Any = model[0].eval()
recursively_load_weights(lowerCAmelCase , lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
lowerCAmelCase :Dict = parser.parse_args()
lowerCAmelCase :Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 275 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase_ : Union[str, Any] = pytest.mark.integration
UpperCAmelCase_ : List[Any] = {'comet'}
UpperCAmelCase_ : int = importlib.util.find_spec('fairseq') is not None
UpperCAmelCase_ : Optional[Any] = {'code_eval'}
UpperCAmelCase_ : Optional[int] = os.name == 'nt'
UpperCAmelCase_ : Dict = {'bertscore', 'frugalscore', 'perplexity'}
UpperCAmelCase_ : Dict = importlib.util.find_spec('transformers') is not None
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , SCREAMING_SNAKE_CASE__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , SCREAMING_SNAKE_CASE__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , SCREAMING_SNAKE_CASE__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_snake_case , _snake_case , _snake_case )
@local
class lowercase__ ( parameterized.TestCase ):
'''simple docstring'''
A_ : Optional[int] = {}
A_ : Union[str, Any] = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : str = """[...]"""
_SCREAMING_SNAKE_CASE : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __snake_case ) ).module_path )
_SCREAMING_SNAKE_CASE : Optional[int] = datasets.load.import_main_class(metric_module.__name__ , dataset=__snake_case )
# check parameters
_SCREAMING_SNAKE_CASE : Tuple = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__snake_case , metric_module.__name__ ):
with self.use_local_metrics():
try:
_SCREAMING_SNAKE_CASE : int = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = """[...]"""
_SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __snake_case ) ).module_path )
# run doctest
with self.use_local_metrics():
_SCREAMING_SNAKE_CASE : List[str] = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__snake_case ):
yield
else:
yield
@contextmanager
def UpperCAmelCase_ ( self ):
def load_local_metric(__snake_case , *__snake_case , **__snake_case ):
return load_metric(os.path.join("""metrics""" , __snake_case ) , *__snake_case , **__snake_case )
with patch("""datasets.load_metric""" ) as mock_load_metric:
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_local_metric
yield
@classmethod
def UpperCAmelCase_ ( cls , __snake_case ):
def wrapper(__snake_case ):
_SCREAMING_SNAKE_CASE : Any = contextmanager(__snake_case )
_SCREAMING_SNAKE_CASE : int = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class lowercase__ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self , __snake_case ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
_SCREAMING_SNAKE_CASE : Any = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
_SCREAMING_SNAKE_CASE : Any = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def load_from_checkpoint(SCREAMING_SNAKE_CASE__ ):
class lowercase__ :
'''simple docstring'''
def UpperCAmelCase_ ( self , __snake_case , *__snake_case , **__snake_case ):
assert len(__snake_case ) == 2
_SCREAMING_SNAKE_CASE : Dict = [0.19, 0.92]
return scores, sum(__snake_case ) / len(__snake_case )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
_SCREAMING_SNAKE_CASE : Any = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
_SCREAMING_SNAKE_CASE : List[str] = load_from_checkpoint
yield
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
_SCREAMING_SNAKE_CASE : List[str] = """ERROR"""
_SCREAMING_SNAKE_CASE : Tuple = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(SCREAMING_SNAKE_CASE__ , match=re.escape(SCREAMING_SNAKE_CASE__ ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE__ )
| 200 | 0 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
A__ : Optional[int] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
import argparse
from collections import defaultdict
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Any ) -> int:
A__ : Optional[Any] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , """r""" ) as f:
A__ : Union[str, Any] = f.readlines()
A__ : str = f"""class {class_name}("""
A__ : Optional[Any] = f"""{4 * ' '}def {test_name}("""
A__ : Union[str, Any] = f"""{8 * ' '}{correct_line.split()[0]}"""
A__ : Optional[int] = f"""{16 * ' '}{correct_line.split()[0]}"""
A__ : int = False
A__ : str = False
A__ : Tuple = False
A__ : Optional[int] = False
A__ : Optional[Any] = 0
A__ : Dict = 0
A__ : List[str] = []
for line in lines:
if line.startswith(lowercase_ ):
A__ : Dict = True
elif in_class and line.startswith(lowercase_ ):
A__ : Optional[Any] = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
A__ : Tuple = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A__ : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A__ : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
A__ : List[str] = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , """w""" ) as f:
for line in new_lines:
f.write(lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , """r""" ) as f:
A__ : Dict = {l.strip() for l in f.readlines()}
else:
A__ : List[str] = None
with open(lowercase_ , """r""" ) as f:
A__ : int = f.readlines()
A__ : Union[str, Any] = defaultdict(lowercase_ )
for line in correct_lines:
A__ , A__ , A__ , A__ : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
A_ : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 141 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[Any]=13 ,A_ : List[Any]=30 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=3 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : List[str]=32 ,A_ : str=2 ,A_ : str=4 ,A_ : int=37 ,A_ : Tuple="gelu" ,A_ : Any=0.1 ,A_ : int=0.1 ,A_ : str=10 ,A_ : List[str]=0.02 ,A_ : int=3 ,A_ : List[Any]=None ,A_ : int=2 ,) -> Dict:
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A = (image_size // patch_size) ** 2
A = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Any ,A_ : Any ,A_ : int ) -> int:
A = TFDeiTModel(config=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = TFDeiTForMaskedImageModeling(config=A_ )
A = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A = 1
A = TFDeiTForMaskedImageModeling(A_ )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(A_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : str ,A_ : Tuple ) -> List[Any]:
A = self.type_sequence_label_size
A = TFDeiTForImageClassification(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = TFDeiTForImageClassification(A_ )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_lowerCamelCase: Dict = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: Dict = False
_lowerCamelCase: Any = False
_lowerCamelCase: Dict = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
A = TFDeiTModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ ,tf.keras.layers.Dense ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[str] ,A_ : Any ,A_ : List[Any]=False ) -> str:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFDeiTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=A_ ,return_tensors='tf' )
# forward pass
A = model(**A_ )
# verify the logits
A = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,A_ )
A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) ) | 74 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
A =logging.get_logger(__name__)
A =TypeVar('DatasetType', Dataset, IterableDataset)
def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
else:
return _interleave_iterable_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a )
else:
return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
| 34 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCamelCase ):
lowerCamelCase_ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase_ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCamelCase ):
lowerCamelCase_ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase_ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__lowerCamelCase )
lowerCamelCase_ = FlaxBertModel.from_pretrained(__lowerCamelCase )
lowerCamelCase_ = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowercase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
for model_name in ["roberta-base", "roberta-large"]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__lowerCamelCase )
lowerCamelCase_ = FlaxRobertaModel.from_pretrained(__lowerCamelCase )
lowerCamelCase_ = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowercase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
def SCREAMING_SNAKE_CASE_( self ) -> Any:
with self.assertRaisesRegex(
__lowerCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCamelCase_ = FlaxAutoModel.from_pretrained("bert-base" )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
with self.assertRaisesRegex(
__lowerCamelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCamelCase_ = FlaxAutoModel.from_pretrained(__lowerCamelCase , revision="aaaaaa" )
def SCREAMING_SNAKE_CASE_( self ) -> str:
with self.assertRaisesRegex(
__lowerCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
lowerCamelCase_ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
with self.assertRaisesRegex(__lowerCamelCase , "Use `from_pt=True` to load this model" ):
lowerCamelCase_ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 367 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A ={
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase__ : set[int] = {ord(char) for char in VALID_CHARS}
lowerCAmelCase__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = ""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
for keychar, cipherchar in zip(cycle(lowerCamelCase ) , lowerCamelCase ):
UpperCAmelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase )
return decoded
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
for key in product(lowerCamelCase , repeat=3 ):
UpperCAmelCase__ = try_key(lowerCamelCase , lowerCamelCase )
if encoded is not None:
possibles.append(lowerCamelCase )
return possibles
def a_ ( lowerCamelCase , lowerCamelCase ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCamelCase = "p059_cipher.txt" ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = Path(lowerCamelCase ).parent.joinpath(lowerCamelCase ).read_text(encoding='utf-8' )
UpperCAmelCase__ = [int(lowerCamelCase ) for number in data.strip().split(',' )]
UpperCAmelCase__ = filter_valid_chars(lowerCamelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase__ = filter_common_word(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
break
UpperCAmelCase__ = possibles[0]
return sum(ord(lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | """simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | 1 |
"""simple docstring"""
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : str = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( snake_case_ : int = 5_0_0_0 ):
'''simple docstring'''
UpperCamelCase : int = [(i * (3 * i - 1)) // 2 for i in range(1 ,snake_case_ )]
for i, pentagonal_i in enumerate(snake_case_ ):
for j in range(snake_case_ ,len(snake_case_ ) ):
UpperCamelCase : Union[str, Any] = pentagonal_nums[j]
UpperCamelCase : Dict = pentagonal_i + pentagonal_j
UpperCamelCase : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case_ ) and is_pentagonal(snake_case_ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 27 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : int = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 27 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _a ( lowerCamelCase_ ):
'''simple docstring'''
A : Dict = ["""image_processor"""]
A : int = """SamImageProcessor"""
def __init__( self, A ):
'''simple docstring'''
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = self.image_processor
SCREAMING_SNAKE_CASE : Dict = -10
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor.size['longest_edge']
def __call__( self, A=None, A=None, A=None, A=None, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor(
_UpperCAmelCase, return_tensors=_UpperCAmelCase, **_UpperCAmelCase, )
# pop arguments that are not used in the foward but used nevertheless
SCREAMING_SNAKE_CASE : List[str] = encoding_image_processor['original_sizes']
if hasattr(_UpperCAmelCase, 'numpy' ): # Checks if Torch or TF tensor
SCREAMING_SNAKE_CASE : Tuple = original_sizes.numpy()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._check_and_preprocess_points(
input_points=_UpperCAmelCase, input_labels=_UpperCAmelCase, input_boxes=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE : Dict = self._normalize_and_convert(
_UpperCAmelCase, _UpperCAmelCase, input_points=_UpperCAmelCase, input_labels=_UpperCAmelCase, input_boxes=_UpperCAmelCase, return_tensors=_UpperCAmelCase, )
return encoding_image_processor
def UpperCamelCase_ ( self, A, A, A=None, A=None, A=None, A="pt", ):
'''simple docstring'''
if input_points is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Dict = [
self._normalize_coordinates(self.target_size, _UpperCAmelCase, original_sizes[0] ) for point in input_points
]
else:
SCREAMING_SNAKE_CASE : str = [
self._normalize_coordinates(self.target_size, _UpperCAmelCase, _UpperCAmelCase )
for point, original_size in zip(_UpperCAmelCase, _UpperCAmelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._pad_points_and_labels(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(_UpperCAmelCase )
if input_labels is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(_UpperCAmelCase )
if input_boxes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Dict = [
self._normalize_coordinates(self.target_size, _UpperCAmelCase, original_sizes[0], is_bounding_box=_UpperCAmelCase )
for box in input_boxes
]
else:
SCREAMING_SNAKE_CASE : str = [
self._normalize_coordinates(self.target_size, _UpperCAmelCase, _UpperCAmelCase, is_bounding_box=_UpperCAmelCase )
for box, original_size in zip(_UpperCAmelCase, _UpperCAmelCase )
]
SCREAMING_SNAKE_CASE : Optional[int] = np.array(_UpperCAmelCase )
if input_boxes is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE : int = torch.from_numpy(_UpperCAmelCase )
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE : Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(_UpperCAmelCase )
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE : Dict = tf.expand_dims(_UpperCAmelCase, 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE : str = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE : List[str] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE : List[str] = tf.expand_dims(_UpperCAmelCase, 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE : Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE : str = tf.expand_dims(_UpperCAmelCase, 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = max([point.shape[0] for point in input_points] )
SCREAMING_SNAKE_CASE : int = []
for i, point in enumerate(_UpperCAmelCase ):
if point.shape[0] != expected_nb_points:
SCREAMING_SNAKE_CASE : int = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value], axis=0 )
SCREAMING_SNAKE_CASE : Dict = np.append(input_labels[i], [self.point_pad_value] )
processed_input_points.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = processed_input_points
return input_points, input_labels
def UpperCamelCase_ ( self, A, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = original_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.image_processor._get_preprocess_shape(_UpperCAmelCase, longest_edge=_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = deepcopy(_UpperCAmelCase ).astype(_UpperCAmelCase )
if is_bounding_box:
SCREAMING_SNAKE_CASE : Union[str, Any] = coords.reshape(-1, 2, 2 )
SCREAMING_SNAKE_CASE : int = coords[..., 0] * (new_w / old_w)
SCREAMING_SNAKE_CASE : Optional[int] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
SCREAMING_SNAKE_CASE : Optional[int] = coords.reshape(-1, 4 )
return coords
def UpperCamelCase_ ( self, A=None, A=None, A=None, ):
'''simple docstring'''
if input_points is not None:
if hasattr(_UpperCAmelCase, 'numpy' ): # Checks for TF or Torch tensor
SCREAMING_SNAKE_CASE : List[Any] = input_points.numpy().tolist()
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or not isinstance(input_points[0], _UpperCAmelCase ):
raise ValueError('Input points must be a list of list of floating points.' )
SCREAMING_SNAKE_CASE : Optional[int] = [np.array(_UpperCAmelCase ) for input_point in input_points]
else:
SCREAMING_SNAKE_CASE : int = None
if input_labels is not None:
if hasattr(_UpperCAmelCase, 'numpy' ):
SCREAMING_SNAKE_CASE : str = input_labels.numpy().tolist()
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or not isinstance(input_labels[0], _UpperCAmelCase ):
raise ValueError('Input labels must be a list of list integers.' )
SCREAMING_SNAKE_CASE : int = [np.array(_UpperCAmelCase ) for label in input_labels]
else:
SCREAMING_SNAKE_CASE : Any = None
if input_boxes is not None:
if hasattr(_UpperCAmelCase, 'numpy' ):
SCREAMING_SNAKE_CASE : int = input_boxes.numpy().tolist()
if (
not isinstance(_UpperCAmelCase, _UpperCAmelCase )
or not isinstance(input_boxes[0], _UpperCAmelCase )
or not isinstance(input_boxes[0][0], _UpperCAmelCase )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
SCREAMING_SNAKE_CASE : int = [np.array(_UpperCAmelCase ).astype(np.floataa ) for box in input_boxes]
else:
SCREAMING_SNAKE_CASE : Tuple = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.image_processor.model_input_names
return list(dict.fromkeys(_UpperCAmelCase ) )
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
return self.image_processor.post_process_masks(*_UpperCAmelCase, **_UpperCAmelCase )
| 251 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
UpperCAmelCase__ = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 346 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 'dpr'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase = 0 , **_lowerCamelCase , ) ->Optional[Any]:
super().__init__(pad_token_id=__A , **__A )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = projection_dim
SCREAMING_SNAKE_CASE : List[str] = position_embedding_type
| 358 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 0 |
snake_case_ : List[Any] = 9.80_665
def A (__A : float , __A : float , __A : float = g ) -> float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 51 | """simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : list[int] ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_UpperCamelCase , (list, tuple) ) or not all(
isinstance(_UpperCamelCase , _UpperCamelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
snake_case = snake_case = snake_case = numbers[0]
for i in range(1 , len(_UpperCamelCase ) ):
# update the maximum and minimum subarray products
snake_case = numbers[i]
if number < 0:
snake_case ,snake_case = min_till_now, max_till_now
snake_case = max(_UpperCamelCase , max_till_now * number )
snake_case = min(_UpperCamelCase , min_till_now * number )
# update the maximum product found till now
snake_case = max(_UpperCamelCase , _UpperCamelCase )
return max_prod
| 150 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( A__ ):
_lowerCamelCase = (UniPCMultistepScheduler,)
_lowerCamelCase = (("num_inference_steps", 25),)
def UpperCamelCase ( self , **lowercase_ ):
_snake_case : int = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**__A )
return config
def UpperCamelCase ( self , lowercase_=0 , **lowercase_ ):
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : Any = kwargs.pop("num_inference_steps" , __A )
_snake_case : List[str] = self.dummy_sample
_snake_case : int = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**__A )
_snake_case : List[str] = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
_snake_case : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
_snake_case : Optional[int] = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case : Union[str, Any] = sample, sample
for t in range(__A , time_step + scheduler.config.solver_order + 1 ):
_snake_case : List[Any] = scheduler.step(__A , __A , __A , **__A ).prev_sample
_snake_case : str = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self , lowercase_=0 , **lowercase_ ):
_snake_case : Optional[Any] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("num_inference_steps" , __A )
_snake_case : str = self.dummy_sample
_snake_case : Union[str, Any] = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : Tuple = self.get_scheduler_config()
_snake_case : Any = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
_snake_case : str = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case : Union[str, Any] = scheduler.step(__A , __A , __A , **__A ).prev_sample
_snake_case : Optional[Any] = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self , lowercase_=None , **lowercase_ ):
if scheduler is None:
_snake_case : List[str] = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config(**__A )
_snake_case : Optional[Any] = scheduler_class(**__A )
_snake_case : Optional[int] = self.scheduler_classes[0]
_snake_case : List[Any] = self.get_scheduler_config(**__A )
_snake_case : List[Any] = scheduler_class(**__A )
_snake_case : Any = 10
_snake_case : List[str] = self.dummy_model()
_snake_case : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : List[Any] = model(__A , __A )
_snake_case : Any = scheduler.step(__A , __A , __A ).prev_sample
return sample
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = dict(self.forward_default_kwargs )
_snake_case : List[str] = kwargs.pop("num_inference_steps" , __A )
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : str = scheduler_class(**__A )
_snake_case : Any = self.dummy_sample
_snake_case : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__A , "set_timesteps" ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A , "set_timesteps" ):
_snake_case : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
_snake_case : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
_snake_case : Any = scheduler.timesteps[5]
_snake_case : List[str] = scheduler.timesteps[6]
_snake_case : Any = scheduler.step(__A , __A , __A , **__A ).prev_sample
_snake_case : int = scheduler.step(__A , __A , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_snake_case : List[str] = UniPCMultistepScheduler(**self.get_scheduler_config() )
_snake_case : Union[str, Any] = self.full_loop(scheduler=__A )
_snake_case : int = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
_snake_case : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_snake_case : List[Any] = DEISMultistepScheduler.from_config(scheduler.config )
_snake_case : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_snake_case : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_snake_case : Union[str, Any] = self.full_loop(scheduler=__A )
_snake_case : List[Any] = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def UpperCamelCase ( self ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__A )
def UpperCamelCase ( self ):
self.check_over_configs(thresholding=__A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , solver_order=__A , solver_type=__A , )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def UpperCamelCase ( self ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__A , solver_type=__A , prediction_type=__A , )
_snake_case : Dict = self.full_loop(
solver_order=__A , solver_type=__A , prediction_type=__A , )
assert not torch.isnan(__A ).any(), "Samples have nan numbers"
def UpperCamelCase ( self ):
self.check_over_configs(lower_order_final=__A )
self.check_over_configs(lower_order_final=__A )
def UpperCamelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__A , time_step=0 )
def UpperCamelCase ( self ):
_snake_case : Tuple = self.full_loop()
_snake_case : Any = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def UpperCamelCase ( self ):
_snake_case : str = self.full_loop(prediction_type="v_prediction" )
_snake_case : str = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def UpperCamelCase ( self ):
_snake_case : str = self.scheduler_classes[0]
_snake_case : List[str] = self.get_scheduler_config(thresholding=__A , dynamic_thresholding_ratio=0 )
_snake_case : int = scheduler_class(**__A )
_snake_case : List[Any] = 10
_snake_case : Tuple = self.dummy_model()
_snake_case : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : List[Any] = model(__A , __A )
_snake_case : Tuple = scheduler.step(__A , __A , __A ).prev_sample
assert sample.dtype == torch.floataa
def UpperCamelCase ( self , **lowercase_ ):
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config(**__A )
_snake_case : int = scheduler_class(**__A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 351 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 284 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
a =parser.parse_args()
a =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 73 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__lowerCamelCase : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[list, list, list, list]:
if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__lowerCamelCase : Tuple = len(lowerCamelCase__ )
__lowerCamelCase : List[Any] = matrix_length // 2
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : str = [
[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )
]
__lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )]
__lowerCamelCase : Optional[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[int, int]:
return len(lowerCamelCase__ ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ )
__lowerCamelCase : str = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : List[str] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : List[Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Tuple = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : Tuple = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ )
# construct the new matrix from our 4 quadrants
__lowerCamelCase : List[Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]:
__lowerCamelCase : Any = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase__ )
__lowerCamelCase : str = matrix_dimensions(lowerCamelCase__ )
__lowerCamelCase : List[str] = matrix_dimensions(lowerCamelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCamelCase : str = max(*lowerCamelCase__ , *lowerCamelCase__ )
__lowerCamelCase : List[str] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) )
__lowerCamelCase : Any = matrixa
__lowerCamelCase : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , lowerCamelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 73 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
snake_case = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
snake_case = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
snake_case = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
snake_case = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
snake_case = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
snake_case = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
snake_case = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
snake_case = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
snake_case = key.replace('image_encoder.module' , 'flava.image_model' )
snake_case = key.replace('text_encoder.module' , 'flava.text_model' )
snake_case = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
snake_case = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
snake_case = key.replace('text_projection' , 'flava.text_projection' )
snake_case = key.replace('image_projection' , 'flava.image_projection' )
snake_case = value.float()
for key, value in codebook_state_dict.items():
snake_case = value
return upgrade
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
snake_case = FlavaConfig.from_pretrained(_UpperCamelCase )
else:
snake_case = FlavaConfig()
snake_case = FlavaForPreTraining(_UpperCamelCase ).eval()
snake_case = convert_dalle_checkpoint(_UpperCamelCase , _UpperCamelCase , save_checkpoint=_UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
snake_case = torch.load(_UpperCamelCase , map_location='cpu' )
else:
snake_case = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' )
snake_case = upgrade_state_dict(_UpperCamelCase , _UpperCamelCase )
hf_model.load_state_dict(_UpperCamelCase )
snake_case = hf_model.state_dict()
snake_case = count_parameters(_UpperCamelCase )
snake_case = count_parameters(_UpperCamelCase ) + count_parameters(_UpperCamelCase )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 149 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
SCREAMING_SNAKE_CASE__ = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 149 | 1 |
def lowerCamelCase__ ( _A = 1000 ):
'''simple docstring'''
snake_case_ = 1, 1
snake_case_ = []
for i in range(1 , n + 1 ):
snake_case_ = prev_numerator + 2 * prev_denominator
snake_case_ = prev_numerator + prev_denominator
if len(str(__snake_case ) ) > len(str(__snake_case ) ):
result.append(__snake_case )
snake_case_ = numerator
snake_case_ = denominator
return len(__snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 187 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__snake_case : Optional[int] = None
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__snake_case : List[Any] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__snake_case : List[str] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
__snake_case : Optional[Any] = '▁'
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'token_type_ids']
__snake_case = FNetTokenizer
def __init__( self : Dict , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : Tuple="[SEP]" , lowerCAmelCase_ : Optional[int]="<pad>" , lowerCAmelCase_ : Tuple="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , **lowerCAmelCase_ : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A__ : List[str] =(
AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ , normalized=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else mask_token
)
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : Dict =do_lower_case
A__ : int =remove_space
A__ : Tuple =keep_accents
A__ : Tuple =vocab_file
A__ : Union[str, Any] =False if not self.vocab_file else True
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : List[str] =[self.sep_token_id]
A__ : int =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Dict =[self.sep_token_id]
A__ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[int] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 134 | 0 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase , ):
super().__init__()
_lowerCamelCase : Optional[int] = value_function
_lowerCamelCase : Optional[int] = unet
_lowerCamelCase : Any = scheduler
_lowerCamelCase : Dict = env
_lowerCamelCase : Any = env.get_dataset()
_lowerCamelCase : Any = {}
for key in self.data.keys():
try:
_lowerCamelCase : Union[str, Any] = self.data[key].mean()
except: # noqa: E722
pass
_lowerCamelCase : Optional[int] = {}
for key in self.data.keys():
try:
_lowerCamelCase : Optional[Any] = self.data[key].std()
except: # noqa: E722
pass
_lowerCamelCase : int = env.observation_space.shape[0]
_lowerCamelCase : Union[str, Any] = env.action_space.shape[0]
def A_ ( self , lowercase , lowercase ):
return (x_in - self.means[key]) / self.stds[key]
def A_ ( self , lowercase , lowercase ):
return x_in * self.stds[key] + self.means[key]
def A_ ( self , lowercase ):
if type(lowercase ) is dict:
return {k: self.to_torch(lowercase ) for k, v in x_in.items()}
elif torch.is_tensor(lowercase ):
return x_in.to(self.unet.device )
return torch.tensor(lowercase , device=self.unet.device )
def A_ ( self , lowercase , lowercase , lowercase ):
for key, val in cond.items():
_lowerCamelCase : Union[str, Any] = val.clone()
return x_in
def A_ ( self , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = x.shape[0]
_lowerCamelCase : Union[str, Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_lowerCamelCase : Optional[int] = torch.full((batch_size,) , lowercase , device=self.unet.device , dtype=torch.long )
for _ in range(lowercase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_lowerCamelCase : Optional[Any] = self.value_function(x.permute(0 , 2 , 1 ) , lowercase ).sample
_lowerCamelCase : int = torch.autograd.grad([y.sum()] , [x] )[0]
_lowerCamelCase : List[Any] = self.scheduler._get_variance(lowercase )
_lowerCamelCase : Union[str, Any] = torch.exp(0.5 * posterior_variance )
_lowerCamelCase : Optional[int] = model_std * grad
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : int = x.detach()
_lowerCamelCase : Union[str, Any] = x + scale * grad
_lowerCamelCase : Union[str, Any] = self.reset_xa(lowercase , lowercase , self.action_dim )
_lowerCamelCase : Union[str, Any] = self.unet(x.permute(0 , 2 , 1 ) , lowercase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_lowerCamelCase : Dict = self.scheduler.step(lowercase , lowercase , lowercase , predict_epsilon=lowercase )['prev_sample']
# apply conditions to the trajectory (set the initial state)
_lowerCamelCase : List[str] = self.reset_xa(lowercase , lowercase , self.action_dim )
_lowerCamelCase : int = self.to_torch(lowercase )
return x, y
def __call__( self , lowercase , lowercase=64 , lowercase=32 , lowercase=2 , lowercase=0.1 ):
# normalize the observations and create batch dimension
_lowerCamelCase : Optional[int] = self.normalize(lowercase , 'observations' )
_lowerCamelCase : int = obs[None].repeat(lowercase , axis=0 )
_lowerCamelCase : Any = {0: self.to_torch(lowercase )}
_lowerCamelCase : str = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_lowerCamelCase : Tuple = randn_tensor(lowercase , device=self.unet.device )
_lowerCamelCase : Optional[Any] = self.reset_xa(lowercase , lowercase , self.action_dim )
_lowerCamelCase : int = self.to_torch(lowercase )
# run the diffusion process
_lowerCamelCase : List[Any] = self.run_diffusion(lowercase , lowercase , lowercase , lowercase )
# sort output trajectories by value
_lowerCamelCase : List[Any] = y.argsort(0 , descending=lowercase ).squeeze()
_lowerCamelCase : List[str] = x[sorted_idx]
_lowerCamelCase : Any = sorted_values[:, :, : self.action_dim]
_lowerCamelCase : Union[str, Any] = actions.detach().cpu().numpy()
_lowerCamelCase : Dict = self.de_normalize(lowercase , key='actions' )
# select the action with the highest value
if y is not None:
_lowerCamelCase : List[Any] = 0
else:
# if we didn't run value guiding, select a random action
_lowerCamelCase : Union[str, Any] = np.random.randint(0 , lowercase )
_lowerCamelCase : Union[str, Any] = denorm_actions[selected_index, 0]
return denorm_actions | 365 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """AutoTokenizer"""
lowerCamelCase__ = ["""tokenizer"""]
lowerCamelCase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , lowercase , lowercase=None ):
super().__init__(lowercase )
_lowerCamelCase : Optional[int] = speaker_embeddings
@classmethod
def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
_lowerCamelCase : Optional[Any] = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(lowercase , lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_lowerCamelCase : List[Any] = None
else:
with open(lowercase ) as speaker_embeddings_json:
_lowerCamelCase : Union[str, Any] = json.load(lowercase )
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase )
_lowerCamelCase : int = {}
_lowerCamelCase : List[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase )
_lowerCamelCase : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , )
_lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' )
_lowerCamelCase : Optional[Any] = tmp_dict
with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def A_ ( self , lowercase = None , **lowercase ):
_lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_lowerCamelCase : Any = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_lowerCamelCase : Union[str, Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_lowerCamelCase : List[str] = np.load(lowercase )
return voice_preset_dict
def A_ ( self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCamelCase : Any = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ):
_lowerCamelCase : Optional[Any] = voice_preset + '.npz'
_lowerCamelCase : Union[str, Any] = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
_lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase )
_lowerCamelCase : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
_lowerCamelCase : Optional[int] = voice_preset
return encoded_text | 12 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
__SCREAMING_SNAKE_CASE = TOKENIZER_CLASSES
else:
__SCREAMING_SNAKE_CASE = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + """Fast""" )}
logger.info(f"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
__SCREAMING_SNAKE_CASE = TOKENIZER_CLASSES[tokenizer_name]
__SCREAMING_SNAKE_CASE = True
if checkpoint_name is None:
__SCREAMING_SNAKE_CASE = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__SCREAMING_SNAKE_CASE = [checkpoint_name]
logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
__SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = checkpoint.split("""/""" )
__SCREAMING_SNAKE_CASE = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
__SCREAMING_SNAKE_CASE = checkpoint
__SCREAMING_SNAKE_CASE = dump_path
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = dump_path
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__SCREAMING_SNAKE_CASE = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__SCREAMING_SNAKE_CASE = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
__SCREAMING_SNAKE_CASE = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = None
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
__SCREAMING_SNAKE_CASE = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(f"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(UpperCamelCase_ )
logger.info(f"=> removing {file_name}" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
__magic_name__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 100 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a_ = datasets.logging.get_logger(__name__)
a_ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
a_ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
a_ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
a_ = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __magic_name__ ( self : Any ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def __magic_name__ ( self : str , __lowercase : Any ) -> List[Any]:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
SCREAMING_SNAKE_CASE__ : Tuple ='''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ : Dict =self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ : Dict =self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE__ : List[str] =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
SCREAMING_SNAKE_CASE__ : Any =score.BleurtScorer(os.path.join(__lowercase , __lowercase ) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Any , __lowercase : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.scorer.score(references=__lowercase , candidates=__lowercase )
return {"scores": scores} | 152 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Dict = {"""vocab_file""": """vocab.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
UpperCamelCase : Any = {"""mgp-str""": 27}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int="[GO]" , UpperCAmelCase_ : int="[GO]" , UpperCAmelCase_ : Optional[int]="[s]" , UpperCAmelCase_ : Dict="[GO]" , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
super().__init__(
unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
a : Optional[int] = json.load(UpperCAmelCase_)
a : Any = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return len(self.vocab)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : List[str] = []
for s in text:
char_tokens.extend(UpperCAmelCase_)
return char_tokens
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return self.vocab.get(UpperCAmelCase_ , self.vocab.get(self.unk_token))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
return self.decoder.get(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error('Vocabulary path ({}) should be a directory'.format(UpperCAmelCase_))
return
a : Optional[int] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
return (vocab_file,)
| 345 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: Dict = logging.get_logger(__name__)
UpperCamelCase__: Optional[Any] = ["model.decoder.embed_positions.weights"]
def snake_case_ ( _lowerCAmelCase : str ) -> Tuple:
if "emb" in name:
UpperCAmelCase : int = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
UpperCAmelCase : Optional[Any] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
UpperCAmelCase : List[str] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
UpperCAmelCase : Any = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
UpperCAmelCase : str = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
UpperCAmelCase : str = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
UpperCAmelCase : Optional[int] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def snake_case_ ( _lowerCAmelCase : OrderedDict , _lowerCAmelCase : int ) -> Tuple[Dict, Dict]:
UpperCAmelCase : int = list(state_dict.keys() )
UpperCAmelCase : Tuple = {}
for key in keys:
UpperCAmelCase : Optional[int] = state_dict.pop(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = rename_keys(_lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Union[str, Any] = val[:hidden_size, :]
UpperCAmelCase : str = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : Tuple = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def snake_case_ ( _lowerCAmelCase : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
UpperCAmelCase : Optional[int] = 1024
UpperCAmelCase : List[Any] = 24
UpperCAmelCase : List[Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : Tuple = 1536
UpperCAmelCase : Union[str, Any] = 48
UpperCAmelCase : Tuple = 24
elif checkpoint == "large":
UpperCAmelCase : Tuple = 2048
UpperCAmelCase : List[Any] = 48
UpperCAmelCase : List[Any] = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
UpperCAmelCase : List[Any] = MusicgenDecoderConfig(
hidden_size=_lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCAmelCase , num_attention_heads=_lowerCAmelCase , )
return config
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Dict="cpu" ) -> str:
UpperCAmelCase : List[str] = MusicGen.get_pretrained(_lowerCAmelCase , device=_lowerCAmelCase )
UpperCAmelCase : Dict = decoder_config_from_checkpoint(_lowerCAmelCase )
UpperCAmelCase : Any = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_state_dict(
_lowerCAmelCase , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Tuple = TaEncoderModel.from_pretrained('''t5-base''' )
UpperCAmelCase : Dict = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
UpperCAmelCase : Optional[int] = MusicgenForCausalLM(_lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCAmelCase : Any = MusicgenForConditionalGeneration(text_encoder=_lowerCAmelCase , audio_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCAmelCase )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : List[str] = model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''t5-base''' )
UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
UpperCAmelCase : int = MusicgenProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# set the appropriate bos/pad token ids
UpperCAmelCase : Optional[int] = 2048
UpperCAmelCase : List[Any] = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : str = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_lowerCAmelCase )
processor.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
UpperCamelCase__: Optional[int] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 23 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318 | 0 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase__ = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
lowercase__ = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
lowercase__ = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
lowercase__ = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
lowercase__ = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def A_ ( self , lowercase , lowercase , lowercase=[1, 10, 100] , lowercase=4 , lowercase=3.0 ):
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowercase ) as executor:
_lowerCamelCase : List[str] = []
_lowerCamelCase : List[Any] = Counter()
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Optional[int] = defaultdict(lowercase )
for task_id, (candidates, test_case) in enumerate(zip(lowercase , lowercase ) ):
for candidate in candidates:
_lowerCamelCase : List[Any] = candidate + '\n' + test_case
_lowerCamelCase : Optional[int] = (test_program, timeout, task_id, completion_id[task_id])
_lowerCamelCase : Tuple = executor.submit(lowercase , *lowercase )
futures.append(lowercase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowercase ):
_lowerCamelCase : Optional[Any] = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
_lowerCamelCase, _lowerCamelCase : Dict = [], []
for result in results.values():
result.sort()
_lowerCamelCase : Dict = [r[1]['passed'] for r in result]
total.append(len(lowercase ) )
correct.append(sum(lowercase ) )
_lowerCamelCase : Optional[int] = np.array(lowercase )
_lowerCamelCase : Tuple = np.array(lowercase )
_lowerCamelCase : Optional[int] = k
_lowerCamelCase : int = {F'''pass@{k}''': estimate_pass_at_k(lowercase , lowercase , lowercase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
def estimator(lowercase__ , lowercase__ , lowercase__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowercase__ , lowercase__ ):
_lowerCamelCase : List[str] = itertools.repeat(lowercase__ , len(lowercase__ ) )
else:
assert len(lowercase__ ) == len(lowercase__ )
_lowerCamelCase : str = iter(lowercase__ )
return np.array([estimator(int(lowercase__ ) , int(lowercase__ ) , lowercase__ ) for n, c in zip(lowercase__ , lowercase__ )] ) | 12 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 12 | 1 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> str:
snake_case_ , snake_case_ = text, pattern
snake_case_ , snake_case_ = len(lowerCAmelCase__), len(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> int:
for i in range(self.patLen - 1, -1, -1):
if char == self.pattern[i]:
return i
return -1
def a_ ( self, lowerCAmelCase__) -> int:
for i in range(self.patLen - 1, -1, -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def a_ ( self) -> list[int]:
# searches pattern in text and returns index positions
snake_case_ = []
for i in range(self.textLen - self.patLen + 1):
snake_case_ = self.mismatch_in_text(lowerCAmelCase__)
if mismatch_index == -1:
positions.append(lowerCAmelCase__)
else:
snake_case_ = self.match_in_pattern(self.text[mismatch_index])
snake_case_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__UpperCamelCase = '''ABAABA'''
__UpperCamelCase = '''AB'''
__UpperCamelCase = BoyerMooreSearch(text, pattern)
__UpperCamelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 69 | """simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
while a != 0:
snake_case_ , snake_case_ = b % a, a
return b
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
if gcd(UpperCAmelCase , UpperCAmelCase ) != 1:
snake_case_ = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = 1, 0, a
snake_case_ , snake_case_ , snake_case_ = 0, 1, m
while va != 0:
snake_case_ = ua // va
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 69 | 1 |
def __a ( lowerCAmelCase_ : int = 2_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_= [0 for i in range(n + 1 )]
UpperCAmelCase_= 1
UpperCAmelCase_= 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,lowerCAmelCase_ ):
UpperCAmelCase_= 1
UpperCAmelCase_= 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 366 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __a ( ) -> str:
'''simple docstring'''
UpperCAmelCase_= torch.nn.Linear(2 ,4 )
UpperCAmelCase_= torch.optim.AdamW(model.parameters() ,lr=1.0 )
UpperCAmelCase_= torch.optim.lr_scheduler.OneCycleLR(lowerCAmelCase_ ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
UpperCAmelCase_= DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCAmelCase_= DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __a ( lowerCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __a ( lowerCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCAmelCase_ )
class lowercase ( snake_case__):
"""simple docstring"""
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_= Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= Accelerator(cpu=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_= GradientState()
assert state.num_steps == 1
UpperCAmelCase_= 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCAmelCase_= False
assert state.sync_gradients is False
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
)= accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__UpperCAmelCase : Dict , **__UpperCAmelCase : Tuple ):
pass
with patch("""torch.cuda.set_device""" , __UpperCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
UpperCAmelCase_= Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= get_signature(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= get_signature(__UpperCAmelCase )
# saving hook
def save_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ):
UpperCAmelCase_= {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(__UpperCAmelCase , """data.json""" ) , """w""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# loading hook
def load_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ):
with open(os.path.join(__UpperCAmelCase , """data.json""" ) , """r""" ) as f:
UpperCAmelCase_= json.load(__UpperCAmelCase )
UpperCAmelCase_= config["""class_name"""]
UpperCAmelCase_= accelerator.register_save_state_pre_hook(__UpperCAmelCase )
UpperCAmelCase_= accelerator.register_load_state_pre_hook(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_= """random"""
# make sure loaded weights match with hooks
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_= """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
UpperCAmelCase_= None
# This should work
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(dummy_obj is None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
UpperCAmelCase_= [1, 2, 3]
# This should work
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map={"""""": 0} , )
UpperCAmelCase_= Accelerator()
# This should work
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= Accelerator()
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= """cpu"""
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=__UpperCAmelCase )
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
UpperCAmelCase_= Accelerator()
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
UpperCAmelCase_= Accelerator()
# This should work
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_= torch.nn.Linear(10 , 10 )
UpperCAmelCase_= torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCAmelCase_= Accelerator(cpu=__UpperCAmelCase )
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
| 277 | 0 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
__lowercase : int = 3_00 # TEMPERATURE (unit = K)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , ):
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : str = ['''input_ids''', '''attention_mask''']
__lowercase : Dict = None
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,**__UpperCAmelCase ,) -> List[str]:
super().__init__(
__UpperCAmelCase ,__UpperCAmelCase ,tokenizer_file=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,**__UpperCAmelCase ,)
lowerCAmelCase__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,__UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ : Dict = getattr(__UpperCAmelCase ,pre_tok_state.pop("""type""" ) )
lowerCAmelCase__ : Optional[int] = add_prefix_space
lowerCAmelCase__ : Any = pre_tok_class(**__UpperCAmelCase )
lowerCAmelCase__ : int = add_prefix_space
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> BatchEncoding:
lowerCAmelCase__ : Optional[Any] = kwargs.get("""is_split_into_words""" ,__UpperCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> BatchEncoding:
lowerCAmelCase__ : Dict = kwargs.get("""is_split_into_words""" ,__UpperCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._encode_plus(*__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
lowerCAmelCase__ : int = self._tokenizer.model.save(__UpperCAmelCase ,name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]:
lowerCAmelCase__ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
lowerCAmelCase__ : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 184 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = IFInpaintingSuperResolutionPipeline
__lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
__lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase_ ( self ) -> Any:
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> List[Any]:
if str(__UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase__ : Any = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ : Dict = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = floats_tensor((1, 3, 16, 16) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ : Any = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase_ ( self ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase_ ( self ) -> List[Any]:
self._test_save_load_local()
def UpperCAmelCase_ ( self ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 184 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCamelCase__ : List[str] = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 246 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : List[str], _lowerCAmelCase : Dict ) -> str:
_UpperCAmelCase : Union[str, Any] = OmegaConf.load(_lowerCAmelCase )
_UpperCAmelCase : str = torch.load(_lowerCAmelCase, map_location="""cpu""" )["""model"""]
_UpperCAmelCase : Dict = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : List[str] = """first_stage_model."""
for key in keys:
if key.startswith(_lowerCAmelCase ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : str = {}
_UpperCAmelCase : Tuple = """model.diffusion_model."""
for key in keys:
if key.startswith(_lowerCAmelCase ):
_UpperCAmelCase : Tuple = state_dict[key]
_UpperCAmelCase : Optional[Any] = config.model.params.first_stage_config.params
_UpperCAmelCase : Optional[Any] = config.model.params.unet_config.params
_UpperCAmelCase : List[str] = VQModel(**_lowerCAmelCase ).eval()
vqvae.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = UNetLDMModel(**_lowerCAmelCase ).eval()
unet.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule="""scaled_linear""", beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=_lowerCAmelCase, )
_UpperCAmelCase : Tuple = LDMPipeline(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
pipeline.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 246 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class _a ( _lowercase):
_a : Union[str, Any] = VOCAB_FILES_NAMES
_a : Tuple = PRETRAINED_VOCAB_FILES_MAP
_a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Dict = ['''input_ids''', '''attention_mask''']
_a : str = RobertaTokenizer
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : List[Any]="replace" , _SCREAMING_SNAKE_CASE : Union[str, Any]="<s>" , _SCREAMING_SNAKE_CASE : int="</s>" , _SCREAMING_SNAKE_CASE : Any="</s>" , _SCREAMING_SNAKE_CASE : str="<s>" , _SCREAMING_SNAKE_CASE : Any="<unk>" , _SCREAMING_SNAKE_CASE : Dict="<pad>" , _SCREAMING_SNAKE_CASE : Any="<mask>" , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , **_SCREAMING_SNAKE_CASE : Any , )-> Any:
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowerCAmelCase__ : int = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
lowerCAmelCase__ : List[str] = add_prefix_space
lowerCAmelCase__ : Union[str, Any] = pre_tok_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = add_prefix_space
lowerCAmelCase__ : Optional[int] = '''post_processor'''
lowerCAmelCase__ : Dict = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
lowerCAmelCase__ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
lowerCAmelCase__ : Any = tuple(state['''cls'''] )
lowerCAmelCase__ : List[str] = False
if state.get('''add_prefix_space''' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowerCAmelCase__ : Dict = add_prefix_space
lowerCAmelCase__ : int = True
if state.get('''trim_offsets''' , _SCREAMING_SNAKE_CASE ) != trim_offsets:
lowerCAmelCase__ : Any = trim_offsets
lowerCAmelCase__ : int = True
if changes_to_apply:
lowerCAmelCase__ : Any = getattr(_SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
lowerCAmelCase__ : Union[str, Any] = component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__( self : List[Any] )-> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict )-> Tuple:
lowerCAmelCase__ : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value
lowerCAmelCase__ : Any = value
def UpperCAmelCase__( self : Any , *_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : Optional[int] )-> BatchEncoding:
lowerCAmelCase__ : Dict = kwargs.get('''is_split_into_words''' , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] , *_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Union[str, Any] )-> BatchEncoding:
lowerCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None )-> Tuple[str]:
lowerCAmelCase__ : Optional[Any] = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None )-> List[Any]:
lowerCAmelCase__ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 211 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 211 | 1 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__lowerCamelCase : Optional[int] = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ : Tuple = False
elif args.student_type == "gpt2":
SCREAMING_SNAKE_CASE_ : int = False
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : int ):
"""simple docstring"""
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ : str = False
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=lowerCAmelCase , required=lowerCAmelCase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=lowerCAmelCase , required=lowerCAmelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=lowerCAmelCase , choices=["distilbert", "roberta", "gpt2"] , required=lowerCAmelCase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=lowerCAmelCase , required=lowerCAmelCase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=lowerCAmelCase , type=lowerCAmelCase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=lowerCAmelCase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=lowerCAmelCase , required=lowerCAmelCase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=lowerCAmelCase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=lowerCAmelCase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=lowerCAmelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=lowerCAmelCase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=lowerCAmelCase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=lowerCAmelCase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=lowerCAmelCase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=lowerCAmelCase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=lowerCAmelCase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=lowerCAmelCase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=lowerCAmelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=lowerCAmelCase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=lowerCAmelCase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=lowerCAmelCase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=lowerCAmelCase , default=5_0 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=lowerCAmelCase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=lowerCAmelCase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=lowerCAmelCase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=lowerCAmelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=lowerCAmelCase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=lowerCAmelCase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowerCAmelCase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=lowerCAmelCase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=lowerCAmelCase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=lowerCAmelCase , default=5_6 , help="Random seed" )
parser.add_argument("--log_interval" , type=lowerCAmelCase , default=5_0_0 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=lowerCAmelCase , default=4_0_0_0 , help="Checkpoint interval." )
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
sanity_checks(lowerCAmelCase )
# ARGS #
init_gpu_params(lowerCAmelCase )
set_seed(lowerCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(lowerCAmelCase ) , lowerCAmelCase , indent=4 )
git_log(args.dump_path )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = MODEL_CLASSES[args.student_type]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
SCREAMING_SNAKE_CASE_ : Dict = teacher_tokenizer_class.from_pretrained(args.teacher_name )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.all_special_tokens.index(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
SCREAMING_SNAKE_CASE_ : Tuple = special_tok_ids
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ : Optional[int] = pickle.load(lowerCAmelCase )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ : str = pickle.load(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = np.maximum(lowerCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
SCREAMING_SNAKE_CASE_ : str = 0.0 # do not predict special tokens
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : List[str] = LmSeqsDataset(params=lowerCAmelCase , data=lowerCAmelCase )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = student_config_class.from_pretrained(args.student_config )
SCREAMING_SNAKE_CASE_ : List[Any] = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : List[str] = student_model_class(lowerCAmelCase )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info("Student loaded." )
# TEACHER #
SCREAMING_SNAKE_CASE_ : List[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCAmelCase )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCAmelCase , lowerCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCAmelCase , lowerCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ : int = Distiller(
params=lowerCAmelCase , dataset=lowerCAmelCase , token_probs=lowerCAmelCase , student=lowerCAmelCase , teacher=lowerCAmelCase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 18 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( lowerCamelCase_ : Tuple = "https://www.worldometers.info/coronavirus" ):
__lowercase = BeautifulSoup(requests.get(lowercase_ ).text , '''html.parser''' )
__lowercase = soup.findAll('''h1''' )
__lowercase = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowercase_ , lowercase_ )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 362 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=sys.maxsize ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''bilinear'''
__lowercase = max_size
__lowercase = short_edge_length
def __call__(self ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = []
for img in imgs:
__lowercase , __lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
__lowercase = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 )
if size == 0:
return img
__lowercase = size * 1.0 / min(_lowerCamelCase ,_lowerCamelCase )
if h < w:
__lowercase , __lowercase = size, scale * w
else:
__lowercase , __lowercase = scale * h, size
if max(_lowerCamelCase ,_lowerCamelCase ) > self.max_size:
__lowercase = self.max_size * 1.0 / max(_lowerCamelCase ,_lowerCamelCase )
__lowercase = newh * scale
__lowercase = neww * scale
__lowercase = int(neww + 0.5 )
__lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
__lowercase = Image.fromarray(_lowerCamelCase )
__lowercase = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR )
__lowercase = np.asarray(_lowerCamelCase )
else:
__lowercase = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__lowercase = nn.functional.interpolate(
_lowerCamelCase ,(newh, neww) ,mode=self.interp_method ,align_corners=_lowerCamelCase ).squeeze(0 )
img_augs.append(_lowerCamelCase )
return img_augs
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST )
__lowercase = cfg.INPUT.FORMAT
__lowercase = cfg.SIZE_DIVISIBILITY
__lowercase = cfg.PAD_VALUE
__lowercase = cfg.INPUT.MAX_SIZE_TEST
__lowercase = cfg.MODEL.DEVICE
__lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
__lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
__lowercase = lambda _lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = tuple(max(_lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
__lowercase = [im.shape[-2:] for im in images]
__lowercase = [
nn.functional.pad(
_lowerCamelCase ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,)
for size, im in zip(_lowerCamelCase ,_lowerCamelCase )
]
return torch.stack(_lowerCamelCase ), torch.tensor(_lowerCamelCase )
def __call__(self ,_lowerCamelCase ,_lowerCamelCase=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [images]
if single_image:
assert len(_lowerCamelCase ) == 1
for i in range(len(_lowerCamelCase ) ):
if isinstance(images[i] ,torch.Tensor ):
images.insert(_lowerCamelCase ,images.pop(_lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] ,torch.Tensor ):
images.insert(
_lowerCamelCase ,torch.as_tensor(img_tensorize(images.pop(_lowerCamelCase ) ,input_format=self.input_format ) )
.to(self.device )
.float() ,)
# resize smallest edge
__lowercase = torch.tensor([im.shape[:2] for im in images] )
__lowercase = self.aug(_lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__lowercase = [self.normalizer(_lowerCamelCase ) for x in images]
# now pad them to do the following operations
__lowercase , __lowercase = self.pad(_lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__lowercase = torch.true_divide(_lowerCamelCase ,_lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Tuple[int, int] ):
assert torch.isfinite(lowerCamelCase_ ).all(), "Box tensor contains infinite or NaN!"
__lowercase , __lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 1].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 2].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 3].clamp_(min=0 , max=lowerCamelCase_ )
| 217 | 0 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if number > 0:
raise ValueError('input must be a negative integer' )
snake_case_ : Dict = len(bin(__a )[3:] )
snake_case_ : Union[str, Any] = bin(abs(__a ) - (1 << binary_number_length) )[3:]
snake_case_ : List[Any] = (
(
'1'
+ '0' * (binary_number_length - len(__a ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: List[Any] = VOCAB_FILES_NAMES
__magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION
__magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Union[str, Any] = LxmertTokenizer
def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) )
snake_case_ : Union[str, Any] = do_lower_case
snake_case_ : int = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : List[Any] = normalizer_class(**_A )
snake_case_ : Tuple = do_lower_case
def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : str = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 327 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : str ="▁"
__lowerCAmelCase : Optional[int] ={"vocab_file": "sentencepiece.bpe.model"}
__lowerCAmelCase : Union[str, Any] ={
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__lowerCAmelCase : List[Any] ={
"facebook/xglm-564M": 2048,
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :List[str]="<s>" , lowercase_ :List[str]="</s>" , lowercase_ :List[str]="</s>" , lowercase_ :List[str]="<s>" , lowercase_ :Any="<unk>" , lowercase_ :Union[str, Any]="<pad>" , lowercase_ :Optional[Dict[str, Any]] = None , **lowercase_ :Union[str, Any] , )-> None:
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
A__ = 7
A__ = [F"<madeupword{i}>" for i in range(self.num_madeup_words )]
A__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
A__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
A__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
A__ = len(self.sp_model )
A__ = {F"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowercase_ )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Tuple )-> Optional[Any]:
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :List[str] , lowercase_ :Optional[Any] )-> str:
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
A__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase_ ( self :List[str] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ ))
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ ))
def UpperCAmelCase_ ( self :List[str] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None )-> List[int]:
A__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase_ ( self :List[str] )-> str:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase_ ( self :Optional[Any] )-> Any:
A__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self :str , lowercase_ :str )-> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[Any] )-> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Optional[Any] )-> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str )-> Union[str, Any]:
A__ = "".join(lowercase_ ).replace(lowercase_ , " " ).strip()
return out_string
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 123 |
'''simple docstring'''
def UpperCamelCase ( ):
A__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A__ = 6
A__ = 1
A__ = 19_01
A__ = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A__ = day - days_per_month[month - 2]
if month > 12:
year += 1
A__ = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 123 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase__ ( _A : int = 3 ):
'''simple docstring'''
if isinstance(_A , _A ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_A ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
a__ =QuantumRegister(_A , '''qr''' )
a__ =ClassicalRegister(_A , '''cr''' )
a__ =QuantumCircuit(_A , _A )
a__ =number_of_qubits
for i in range(_A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_A , _A )
# simulate with 10000 shots
a__ =Aer.get_backend('''qasm_simulator''' )
a__ =execute(_A , _A , shots=1_00_00 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 188 |
from math import factorial
def UpperCAmelCase__ ( _A : int = 1_00 ):
'''simple docstring'''
return sum(int(_A ) for x in str(factorial(_A ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 188 | 1 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_A = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_A = direct_transformers_import(PATH_TO_TRANSFORMERS)
_A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_A = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Tuple = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
lowerCAmelCase__ : Optional[int] = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __UpperCAmelCase , )
is not None
):
lowerCAmelCase__ : Union[str, Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCAmelCase__ : Optional[int] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCAmelCase__ : Any = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
lowerCAmelCase__ : Tuple = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
lowerCAmelCase__ : Union[str, Any] = True
if not attribute_used:
lowerCAmelCase__ : Any = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCAmelCase__ : Union[str, Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCAmelCase__ : Dict = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCAmelCase__ : Optional[Any] = True
elif attribute.endswith("""_token_id""" ):
lowerCAmelCase__ : int = True
# configuration class specific cases
if not case_allowed:
lowerCAmelCase__ : Any = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCAmelCase__ : Tuple = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCAmelCase__ : int = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
lowerCAmelCase__ : Tuple = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCAmelCase__ : str = {}
if len(config_class.attribute_map ) > 0:
lowerCAmelCase__ : Any = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCAmelCase__ : int = inspect.getsourcefile(__UpperCAmelCase )
lowerCAmelCase__ : int = os.path.dirname(__UpperCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCAmelCase__ : str = [os.path.join(__UpperCAmelCase , __UpperCAmelCase ) for fn in os.listdir(__UpperCAmelCase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
lowerCAmelCase__ : Tuple = []
for path in modeling_paths:
if os.path.isfile(__UpperCAmelCase ):
with open(__UpperCAmelCase ) as fp:
modeling_sources.append(fp.read() )
lowerCAmelCase__ : Optional[Any] = []
for config_param, default_value in zip(__UpperCAmelCase , __UpperCAmelCase ):
# `attributes` here is all the variant names for `config_param`
lowerCAmelCase__ : Tuple = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(__UpperCAmelCase )
def lowercase_ ( ) -> List[str]:
lowerCAmelCase__ : str = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCAmelCase__ : Union[str, Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __UpperCAmelCase : inspect.isclass(__UpperCAmelCase )
and issubclass(__UpperCAmelCase , __UpperCAmelCase )
and inspect.getmodule(__UpperCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCAmelCase__ : str = check_config_attributes_being_used(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : str = unused_attributes
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : Any = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(__UpperCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 352 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _lowerCamelCase ( a_ ):
def __init__( self : Tuple , UpperCamelCase : List[Any]="" , UpperCamelCase : List[str]="train" ) -> List[Any]:
"""simple docstring"""
assert os.path.isdir(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[Any] = os.listdir(UpperCamelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , UpperCamelCase )
if not os.path.isfile(UpperCamelCase ):
continue
self.documents.append(UpperCamelCase )
def __len__( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.documents )
def __getitem__( self : str , UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Any = self.documents[idx]
lowerCAmelCase__ : List[Any] = document_path.split("""/""" )[-1]
with open(UpperCamelCase , encoding="""utf-8""" ) as source:
lowerCAmelCase__ : List[str] = source.read()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = process_story(UpperCamelCase )
return document_name, story_lines, summary_lines
def lowercase_ ( __UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = list(filter(lambda __UpperCAmelCase : len(__UpperCAmelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase__ : List[str] = [_add_missing_period(__UpperCAmelCase ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Optional[int] = deque(__UpperCAmelCase )
while True:
try:
lowerCAmelCase__ : List[Any] = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(__UpperCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase__ : Any = list(filter(lambda __UpperCAmelCase : not t.startswith("""@highlight""" ) , __UpperCAmelCase ) )
return story_lines, summary_lines
def lowercase_ ( __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[str] = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
if len(__UpperCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__UpperCAmelCase )) )
return sequence
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : List[str] = torch.ones_like(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = sequence == pad_token_id
lowerCAmelCase__ : str = 0
return mask
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Tuple = [tokenizer.encode(__UpperCAmelCase ) for line in story_lines]
lowerCAmelCase__ : List[str] = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase__ : int = [tokenizer.encode(__UpperCAmelCase ) for line in summary_lines]
lowerCAmelCase__ : Union[str, Any] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Tuple = []
for sequence in batch:
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : List[str] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__UpperCAmelCase )
return torch.tensor(__UpperCAmelCase )
| 212 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowercase : Tuple = logging.get_logger(__name__)
lowercase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : List[str] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
lowercase : Any = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
lowercase : Tuple = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class __snake_case ( lowerCAmelCase ):
_a : Optional[Any]= VOCAB_FILES_NAMES
_a : List[str]= PRETRAINED_VOCAB_FILES_MAP
_a : Optional[Any]= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : str= PRETRAINED_INIT_CONFIGURATION
_a : List[Any]= ["input_ids", "attention_mask"]
_a : str= DistilBertTokenizer
def __init__( self ,snake_case=None ,snake_case=None ,snake_case=True ,snake_case="[UNK]" ,snake_case="[SEP]" ,snake_case="[PAD]" ,snake_case="[CLS]" ,snake_case="[MASK]" ,snake_case=True ,snake_case=None ,**snake_case ,):
'''simple docstring'''
super().__init__(
snake_case ,tokenizer_file=snake_case ,do_lower_case=snake_case ,unk_token=snake_case ,sep_token=snake_case ,pad_token=snake_case ,cls_token=snake_case ,mask_token=snake_case ,tokenize_chinese_chars=snake_case ,strip_accents=snake_case ,**snake_case ,)
lowercase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,snake_case ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,snake_case ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,snake_case ) != tokenize_chinese_chars
):
lowercase : Optional[Any] = getattr(snake_case ,normalizer_state.pop("""type""" ) )
lowercase : int = do_lower_case
lowercase : Optional[int] = strip_accents
lowercase : int = tokenize_chinese_chars
lowercase : Dict = normalizer_class(**snake_case )
lowercase : str = do_lower_case
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Any = [self.sep_token_id]
lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Optional[int] = self._tokenizer.model.save(snake_case ,name=snake_case )
return tuple(snake_case )
| 20 |
from math import sqrt
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase : Union[str, Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase : str = False
for divisor in range(2 , int(round(sqrt(SCREAMING_SNAKE_CASE__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase : Any = False
break
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'status' must been from type bool"
return status
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase : str = list(range(2 , n + 1 ) )
lowercase : Tuple = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase : Tuple = 0
# filters actual prime numbers.
lowercase : int = [x for x in begin_list if x != 0]
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
lowercase : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(SCREAMING_SNAKE_CASE__ ):
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number >= 0, "'number' must been an int and >= 0"
lowercase : Tuple = [] # this list will be returns of the function.
# potential prime number factors.
lowercase : Optional[Any] = 2
lowercase : Any = number
if number == 0 or number == 1:
ans.append(SCREAMING_SNAKE_CASE__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(SCREAMING_SNAKE_CASE__ ):
while quotient != 1:
if is_prime(SCREAMING_SNAKE_CASE__ ) and (quotient % factor == 0):
ans.append(SCREAMING_SNAKE_CASE__ )
quotient /= factor
else:
factor += 1
else:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Tuple = 0
# prime factorization of 'number'
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Union[str, Any] = 0
# prime factorization of 'number'
lowercase : Tuple = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (number > 2) and is_even(SCREAMING_SNAKE_CASE__ )
), "'number' must been an int, even and > 2"
lowercase : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase : str = get_prime_numbers(SCREAMING_SNAKE_CASE__ )
lowercase : Any = len(SCREAMING_SNAKE_CASE__ )
# run variable for while-loops.
lowercase : Optional[Any] = 0
lowercase : List[Any] = None
# exit variable. for break up the loops
lowercase : Any = True
while i < len_pn and loop:
lowercase : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (len(SCREAMING_SNAKE_CASE__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase : Union[str, Any] = 0
while numbera != 0:
lowercase : Optional[int] = numbera % numbera
lowercase : Optional[int] = numbera
lowercase : Dict = rest
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase : Optional[Any] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
elif numbera == 1 or numbera == 1:
lowercase : Union[str, Any] = []
lowercase : List[str] = []
lowercase : Dict = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = 0
lowercase : Optional[Any] = 0
lowercase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase : Dict = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
ans *= n
else:
lowercase : List[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase : Optional[int] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'number' must been a positive int"
lowercase : Dict = 0
lowercase : List[str] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
ans += 1
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and is_prime(
SCREAMING_SNAKE_CASE__ ), "'ans' must been a prime number and from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert (
is_prime(SCREAMING_SNAKE_CASE__ ) and is_prime(SCREAMING_SNAKE_CASE__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase : List[str] = p_number_a + 1 # jump to the next number
lowercase : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
while number < p_number_a:
ans.append(SCREAMING_SNAKE_CASE__ )
number += 1
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and ans[0] != p_number_a
and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 1), "'n' must been int and >= 1"
lowercase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert ans[0] == 1 and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase : str = get_divisors(SCREAMING_SNAKE_CASE__ )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (divisors[0] == 1)
and (divisors[len(SCREAMING_SNAKE_CASE__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase : Tuple = gcd(abs(SCREAMING_SNAKE_CASE__ ) , abs(SCREAMING_SNAKE_CASE__ ) )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase : List[str] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase : int = 0
lowercase : Union[str, Any] = 1
lowercase : int = 1 # this will be return
for _ in range(n - 1 ):
lowercase : Optional[int] = ans
ans += fiba
lowercase : Optional[int] = tmp
return ans
| 20 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
return image
def A ( lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def A ( lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase )
UpperCamelCase = val
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
UpperCamelCase = qkv_bias
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 364 if 'coco' in model_name else 224
UpperCamelCase = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase = BlipaConfig(vision_config=lowercase , text_config=lowercase )
return config, image_size
@torch.no_grad()
def A ( lowercase , lowercase=None , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase = tokenizer('\n' , add_special_tokens=lowercase ).input_ids[0]
UpperCamelCase , UpperCamelCase = get_blipa_config(lowercase , eos_token_id=lowercase )
UpperCamelCase = BlipaForConditionalGeneration(lowercase ).eval()
UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase = original_model.state_dict()
UpperCamelCase = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase = state_dict.pop(lowercase )
if key.startswith('Qformer.bert' ):
UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase = key.replace('t5' , 'language' )
UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
UpperCamelCase , UpperCamelCase = hf_model.load_state_dict(lowercase , strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase = load_demo_image()
UpperCamelCase = vis_processors['eval'](lowercase ).unsqueeze(0 ).to(lowercase )
UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowercase )
# create processor
UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase , image_std=lowercase )
UpperCamelCase = BlipaProcessor(image_processor=lowercase , tokenizer=lowercase )
UpperCamelCase = processor(images=lowercase , return_tensors='pt' ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase = hf_model(lowercase , lowercase ).logits
else:
UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase = hf_model(lowercase , lowercase , labels=lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase = torch.tensor(
[[-41.5_850, -4.4_4_4_0, -8.9_9_2_2], [-47.4_322, -5.9_1_4_3, -1.7_3_4_0]] , device=lowercase )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase = torch.tensor(
[[-57.0_109, -9.8_9_6_7, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=lowercase )
else:
# cast to same type
UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) , lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase = ''
UpperCamelCase = tokenizer(lowercase , return_tensors='pt' ).input_ids.to(lowercase )
UpperCamelCase = original_model.generate({'image': original_pixel_values} )
UpperCamelCase = hf_model.generate(
lowercase , lowercase , do_sample=lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowercase )
UpperCamelCase = input_ids.shape[1]
UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase )
UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
_UpperCAmelCase : str = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 367 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
return image
def A ( lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def A ( lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase )
UpperCamelCase = val
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
UpperCamelCase = qkv_bias
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 364 if 'coco' in model_name else 224
UpperCamelCase = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase = BlipaConfig(vision_config=lowercase , text_config=lowercase )
return config, image_size
@torch.no_grad()
def A ( lowercase , lowercase=None , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase = tokenizer('\n' , add_special_tokens=lowercase ).input_ids[0]
UpperCamelCase , UpperCamelCase = get_blipa_config(lowercase , eos_token_id=lowercase )
UpperCamelCase = BlipaForConditionalGeneration(lowercase ).eval()
UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase = original_model.state_dict()
UpperCamelCase = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase = state_dict.pop(lowercase )
if key.startswith('Qformer.bert' ):
UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase = key.replace('t5' , 'language' )
UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
UpperCamelCase , UpperCamelCase = hf_model.load_state_dict(lowercase , strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase = load_demo_image()
UpperCamelCase = vis_processors['eval'](lowercase ).unsqueeze(0 ).to(lowercase )
UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowercase )
# create processor
UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase , image_std=lowercase )
UpperCamelCase = BlipaProcessor(image_processor=lowercase , tokenizer=lowercase )
UpperCamelCase = processor(images=lowercase , return_tensors='pt' ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase = hf_model(lowercase , lowercase ).logits
else:
UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase = hf_model(lowercase , lowercase , labels=lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowercase )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowercase )
else:
# cast to same type
UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) , lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase = ''
UpperCamelCase = tokenizer(lowercase , return_tensors='pt' ).input_ids.to(lowercase )
UpperCamelCase = original_model.generate({'image': original_pixel_values} )
UpperCamelCase = hf_model.generate(
lowercase , lowercase , do_sample=lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowercase )
UpperCamelCase = input_ids.shape[1]
UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase )
UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
_UpperCAmelCase : str = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 110 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """deberta-v2"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : int=12_8100 , UpperCamelCase__ : Union[str, Any]=1536 , UpperCamelCase__ : Any=24 , UpperCamelCase__ : int=24 , UpperCamelCase__ : List[Any]=6144 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : str=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : List[Any]="gelu" , **UpperCamelCase__ : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = relative_attention
__magic_name__ = max_relative_positions
__magic_name__ = pad_token_id
__magic_name__ = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
__magic_name__ = [x.strip() for x in pos_att_type.lower().split("""|""" )]
__magic_name__ = pos_att_type
__magic_name__ = vocab_size
__magic_name__ = layer_norm_eps
__magic_name__ = kwargs.get("""pooler_hidden_size""" , UpperCamelCase__ )
__magic_name__ = pooler_dropout
__magic_name__ = pooler_hidden_act
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@property
def _lowercase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__magic_name__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
return 12
def _lowercase ( self : str , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__magic_name__ = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 88 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = 42
@flax_register_to_config
class A_ ( nn.Module , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase__ = 32
lowerCAmelCase__ = 4
lowerCAmelCase__ = 4
lowerCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCAmelCase__ = False
lowerCAmelCase__ = (320, 640, 1280, 1280)
lowerCAmelCase__ = 2
lowerCAmelCase__ = 8
lowerCAmelCase__ = None
lowerCAmelCase__ = 1280
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = False
lowerCAmelCase__ = jnp.floataa
lowerCAmelCase__ = True
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :jax.random.KeyArray )-> FrozenDict:
# init input tensors
__A = (1, self.in_channels, self.sample_size, self.sample_size)
__A = jnp.zeros(_UpperCamelCase , dtype=jnp.floataa )
__A = jnp.ones((1,) , dtype=jnp.intaa )
__A = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__A , __A = jax.random.split(_UpperCamelCase )
__A = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )["params"]
def _lowerCAmelCase (self :Tuple )-> Optional[int]:
__A = self.block_out_channels
__A = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__A = self.num_attention_heads or self.attention_head_dim
# input
__A = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__A = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__A = FlaxTimestepEmbedding(_UpperCamelCase , dtype=self.dtype )
__A = self.only_cross_attention
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = (num_attention_heads,) * len(self.down_block_types )
# down
__A = []
__A = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__A = output_channel
__A = block_out_channels[i]
__A = i == len(_UpperCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__A = FlaxCrossAttnDownBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__A = FlaxDownBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_UpperCamelCase )
__A = down_blocks
# mid
__A = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__A = []
__A = list(reversed(_UpperCamelCase ) )
__A = list(reversed(_UpperCamelCase ) )
__A = list(reversed(_UpperCamelCase ) )
__A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__A = output_channel
__A = reversed_block_out_channels[i]
__A = reversed_block_out_channels[min(i + 1 , len(_UpperCamelCase ) - 1 )]
__A = i == len(_UpperCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__A = FlaxCrossAttnUpBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , prev_output_channel=_UpperCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__A = FlaxUpBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , prev_output_channel=_UpperCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_UpperCamelCase )
__A = output_channel
__A = up_blocks
# out
__A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self :Union[str, Any] , _UpperCamelCase :Any , _UpperCamelCase :List[str] , _UpperCamelCase :str , _UpperCamelCase :int=None , _UpperCamelCase :Optional[int]=None , _UpperCamelCase :bool = True , _UpperCamelCase :bool = False , )-> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_UpperCamelCase , jnp.ndarray ):
__A = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_UpperCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__A = timesteps.astype(dtype=jnp.floataa )
__A = jnp.expand_dims(_UpperCamelCase , 0 )
__A = self.time_proj(_UpperCamelCase )
__A = self.time_embedding(_UpperCamelCase )
# 2. pre-process
__A = jnp.transpose(_UpperCamelCase , (0, 2, 3, 1) )
__A = self.conv_in(_UpperCamelCase )
# 3. down
__A = (sample,)
for down_block in self.down_blocks:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A , __A = down_block(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , deterministic=not train )
else:
__A , __A = down_block(_UpperCamelCase , _UpperCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__A = ()
for down_block_res_sample, down_block_additional_residual in zip(
_UpperCamelCase , _UpperCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__A = new_down_block_res_samples
# 4. mid
__A = self.mid_block(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__A = down_block_res_samples[-(self.layers_per_block + 1) :]
__A = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = up_block(
_UpperCamelCase , temb=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , res_hidden_states_tuple=_UpperCamelCase , deterministic=not train , )
else:
__A = up_block(_UpperCamelCase , temb=_UpperCamelCase , res_hidden_states_tuple=_UpperCamelCase , deterministic=not train )
# 6. post-process
__A = self.conv_norm_out(_UpperCamelCase )
__A = nn.silu(_UpperCamelCase )
__A = self.conv_out(_UpperCamelCase )
__A = jnp.transpose(_UpperCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_UpperCamelCase )
| 117 | 0 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : Optional[Any] = '''
import os
'''
snake_case__ : Tuple = '''
def foo():
import os
return False
'''
snake_case__ : Any = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
snake_case__ : Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : int = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
snake_case__ : List[str] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
snake_case__ : int = '''
import os
try:
import bar
except:
raise ValueError()
'''
snake_case__ : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
snake_case__ : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
snake_case__ : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ):
lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCAmelCase : Tuple = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 356 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
snake_case__ : Optional[Any] = False
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ):
set_seed(0 )
lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 )
lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
lowerCAmelCase : int = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 314 | 0 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase , lowerCamelCase = image.size
lowerCamelCase , lowerCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
lowerCamelCase = np.array(snake_case__ ).astype(np.floataa ) / 255.0
lowerCamelCase = image[None].transpose(0 , 3 , 1 , 2 )
lowerCamelCase = torch.from_numpy(snake_case__ )
return 2.0 * image - 1.0
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
"""simple docstring"""
if isinstance(_a , PIL.Image.Image ):
lowerCamelCase = 1
elif isinstance(_a , torch.Tensor ):
lowerCamelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}' )
if isinstance(_a , PIL.Image.Image ):
lowerCamelCase = preprocess(_a )
lowerCamelCase , lowerCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCamelCase = next(self.unet.parameters() ).dtype
lowerCamelCase = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
lowerCamelCase = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
lowerCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase = {}
if accepts_eta:
lowerCamelCase = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
lowerCamelCase = torch.cat([latents, image] , dim=1 )
lowerCamelCase = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
lowerCamelCase = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
lowerCamelCase = self.vqvae.decode(_a ).sample
lowerCamelCase = torch.clamp(_a , -1.0 , 1.0 )
lowerCamelCase = image / 2 + 0.5
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 291 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ) -> Union[str, Any]:
lowerCamelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__ )
return parser.parse_args()
def a__ ( ) -> List[str]:
lowerCamelCase = parse_args()
# Import training_script as a module.
lowerCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase = script_fpath.stem
lowerCamelCase = importlib.import_module(snake_case__ )
# Patch sys.argv
lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 291 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = None
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = PandasConfig
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowercase : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ ,(str, list, tuple) ):
_lowercase : List[Any] = data_files
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowercase : str = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )]
_lowercase : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowercase : Tuple = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ ,gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowercase : List[str] = table_cast(UpperCAmelCase_ ,self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
for i, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
with open(UpperCAmelCase_ ,"""rb""" ) as f:
_lowercase : str = pa.Table.from_pandas(pd.read_pickle(UpperCAmelCase_ ) )
yield i, self._cast_table(UpperCAmelCase_ )
| 354 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.