code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict="</s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]=1_0_0 , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , SCREAMING_SNAKE_CASE__ : List[str]=True , **SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__a : int = [f'''<extra_id_{i}>''' for i in range(SCREAMING_SNAKE_CASE__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__a : List[Any] = len(set(filter(lambda SCREAMING_SNAKE_CASE__ : bool('extra_id' in str(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
__a : Union[str, Any] = legacy
__a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , extra_ids=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , legacy=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a : Optional[int] = vocab_file
__a : List[Any] = extra_ids
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__a : Union[str, Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , SCREAMING_SNAKE_CASE__ , )
return max_model_length
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : List[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda SCREAMING_SNAKE_CASE__ : bool(re.search(r'<extra_id_\d+>' , SCREAMING_SNAKE_CASE__ ) ) is not None , self.additional_special_tokens ) ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return [self._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) for token in self.get_sentinel_tokens()]
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : str = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return token_ids_a
else:
__a : int = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
return token_ids_a + token_ids_a
def __getstate__( self : str ):
'''simple docstring'''
__a : Optional[Any] = self.__dict__.copy()
__a : Union[str, Any] = None
return state
def __setstate__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Dict = {}
__a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : "TextInput" , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if not self.legacy:
__a : Tuple = SPIECE_UNDERLINE + text.replace(SCREAMING_SNAKE_CASE__ , ' ' )
return super().tokenize(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
if not self.legacy:
__a : Optional[int] = text.startswith(SCREAMING_SNAKE_CASE__ )
if is_first:
__a : List[Any] = text[1:]
__a : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if token.startswith('<extra_id_' ):
__a : Dict = re.match(r'<extra_id_(\d+)>' , SCREAMING_SNAKE_CASE__ )
__a : List[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__a : Union[str, Any] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
else:
__a : List[str] = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Tuple = []
__a : List[Any] = ''
__a : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : str = True
__a : str = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
__a : Dict = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_lowerCAmelCase : Dict = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_lowerCAmelCase : str = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_lowerCAmelCase : Dict = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_lowerCAmelCase : int = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
if isinstance(snake_case , snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Any , snake_case : str=False )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = checkpoint[f'{old_prefix}.in_layers.0.weight']
UpperCAmelCase__ : List[Any] = checkpoint[f'{old_prefix}.in_layers.0.bias']
UpperCAmelCase__ : Dict = checkpoint[f'{old_prefix}.in_layers.2.weight']
UpperCAmelCase__ : List[Any] = checkpoint[f'{old_prefix}.in_layers.2.bias']
UpperCAmelCase__ : Optional[int] = checkpoint[f'{old_prefix}.emb_layers.1.weight']
UpperCAmelCase__ : Dict = checkpoint[f'{old_prefix}.emb_layers.1.bias']
UpperCAmelCase__ : Any = checkpoint[f'{old_prefix}.out_layers.0.weight']
UpperCAmelCase__ : List[Any] = checkpoint[f'{old_prefix}.out_layers.0.bias']
UpperCAmelCase__ : int = checkpoint[f'{old_prefix}.out_layers.3.weight']
UpperCAmelCase__ : int = checkpoint[f'{old_prefix}.out_layers.3.bias']
if has_skip:
UpperCAmelCase__ : str = checkpoint[f'{old_prefix}.skip_connection.weight']
UpperCAmelCase__ : Any = checkpoint[f'{old_prefix}.skip_connection.bias']
return new_checkpoint
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Any , snake_case : Tuple , snake_case : Dict=None )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
UpperCAmelCase__ : Dict = checkpoint[f'{old_prefix}.norm.weight']
UpperCAmelCase__ : Tuple = checkpoint[f'{old_prefix}.norm.bias']
UpperCAmelCase__ : Tuple = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : int = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Optional[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Union[str, Any] = (
checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase__ : List[str] = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Union[str, Any] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Any = torch.load(snake_case , map_location="cpu" )
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : Any = checkpoint["time_embed.0.weight"]
UpperCAmelCase__ : Optional[int] = checkpoint["time_embed.0.bias"]
UpperCAmelCase__ : Any = checkpoint["time_embed.2.weight"]
UpperCAmelCase__ : Optional[Any] = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase__ : List[Any] = checkpoint["label_emb.weight"]
UpperCAmelCase__ : Dict = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase__ : List[Any] = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase__ : Any = unet_config["down_block_types"]
UpperCAmelCase__ : Tuple = unet_config["layers_per_block"]
UpperCAmelCase__ : str = unet_config["attention_head_dim"]
UpperCAmelCase__ : str = unet_config["block_out_channels"]
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : Tuple = channels_list[0]
for i, layer_type in enumerate(snake_case ):
UpperCAmelCase__ : List[str] = channels_list[i]
UpperCAmelCase__ : Optional[int] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case ):
UpperCAmelCase__ : int = f'down_blocks.{i}.resnets.{j}'
UpperCAmelCase__ : Union[str, Any] = f'input_blocks.{current_layer}.0'
UpperCAmelCase__ : List[str] = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase__ : Optional[Any] = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case ):
UpperCAmelCase__ : List[str] = f'down_blocks.{i}.resnets.{j}'
UpperCAmelCase__ : Optional[Any] = f'input_blocks.{current_layer}.0'
UpperCAmelCase__ : Optional[int] = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase__ : Any = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case )
UpperCAmelCase__ : Dict = f'down_blocks.{i}.attentions.{j}'
UpperCAmelCase__ : List[Any] = f'input_blocks.{current_layer}.1'
UpperCAmelCase__ : Optional[Any] = convert_attention(
snake_case , snake_case , snake_case , snake_case , snake_case )
current_layer += 1
if i != len(snake_case ) - 1:
UpperCAmelCase__ : Dict = f'down_blocks.{i}.downsamplers.0'
UpperCAmelCase__ : Optional[int] = f'input_blocks.{current_layer}.0'
UpperCAmelCase__ : Tuple = convert_resnet(snake_case , snake_case , snake_case , snake_case )
current_layer += 1
UpperCAmelCase__ : Dict = current_channels
# hardcoded the mid-block for now
UpperCAmelCase__ : int = "mid_block.resnets.0"
UpperCAmelCase__ : Optional[int] = "middle_block.0"
UpperCAmelCase__ : Any = convert_resnet(snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : Optional[int] = "mid_block.attentions.0"
UpperCAmelCase__ : Tuple = "middle_block.1"
UpperCAmelCase__ : Any = convert_attention(snake_case , snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : Optional[int] = "mid_block.resnets.1"
UpperCAmelCase__ : Union[str, Any] = "middle_block.2"
UpperCAmelCase__ : Optional[int] = convert_resnet(snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : int = unet_config["up_block_types"]
for i, layer_type in enumerate(snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase__ : Optional[Any] = f'up_blocks.{i}.resnets.{j}'
UpperCAmelCase__ : List[str] = f'output_blocks.{current_layer}.0'
UpperCAmelCase__ : List[str] = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case )
current_layer += 1
if i != len(snake_case ) - 1:
UpperCAmelCase__ : Dict = f'up_blocks.{i}.upsamplers.0'
UpperCAmelCase__ : Optional[Any] = f'output_blocks.{current_layer-1}.1'
UpperCAmelCase__ : Optional[Any] = convert_resnet(snake_case , snake_case , snake_case , snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase__ : Optional[Any] = f'up_blocks.{i}.resnets.{j}'
UpperCAmelCase__ : Tuple = f'output_blocks.{current_layer}.0'
UpperCAmelCase__ : Union[str, Any] = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case )
UpperCAmelCase__ : Optional[int] = f'up_blocks.{i}.attentions.{j}'
UpperCAmelCase__ : List[Any] = f'output_blocks.{current_layer}.1'
UpperCAmelCase__ : Any = convert_attention(
snake_case , snake_case , snake_case , snake_case , snake_case )
current_layer += 1
if i != len(snake_case ) - 1:
UpperCAmelCase__ : Dict = f'up_blocks.{i}.upsamplers.0'
UpperCAmelCase__ : Any = f'output_blocks.{current_layer-1}.2'
UpperCAmelCase__ : int = convert_resnet(snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = checkpoint["out.0.weight"]
UpperCAmelCase__ : Any = checkpoint["out.0.bias"]
UpperCAmelCase__ : int = checkpoint["out.2.weight"]
UpperCAmelCase__ : str = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
_lowerCAmelCase : int = parser.parse_args()
_lowerCAmelCase : Optional[int] = strabool(args.class_cond)
_lowerCAmelCase : Optional[Any] = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
_lowerCAmelCase : Dict = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase : Dict = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_lowerCAmelCase : str = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
_lowerCAmelCase : Optional[Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_lowerCAmelCase : Optional[int] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_lowerCAmelCase : str = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase : Any = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
_lowerCAmelCase : Dict = CMStochasticIterativeScheduler(**scheduler_config)
_lowerCAmelCase : Tuple = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 438 | 0 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = Rectangle(height=0.25 , width=0.25 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.8 )
target.move_to(__a )
model_arr.append(__a )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__a )
self.add(*__a , *__a )
_a = [meta_mem.copy() for i in range(6 )]
_a = [meta_mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("Disk" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
disk.move_to([-4, -1.25, 0] )
self.add(__a , __a )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__a )
_a = MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) )
_a = Square(0.3 )
input.set_fill(__a , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __a , buff=0.5 )
self.play(Write(__a ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__a , buff=0.02 )
self.play(MoveToTarget(__a ) )
self.play(FadeOut(__a ) )
_a = Arrow(start=__a , end=__a , color=__a , buff=0.5 )
a.next_to(model_arr[0].get_left() , __a , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_a = MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) )
_a = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(__a ) , Circumscribe(model_arr[0] , color=__a , **__a ) , Circumscribe(model_cpu_arr[0] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_a = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __a , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_a = AnimationGroup(
FadeOut(__a , run_time=0.5 ) , MoveToTarget(__a , run_time=0.5 ) , FadeIn(__a , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_a = 0.7
self.play(
Circumscribe(model_arr[i] , **__a ) , Circumscribe(cpu_left_col_base[i] , **__a ) , Circumscribe(cpu_left_col_base[i + 1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , Circumscribe(model_arr[i + 1] , color=__a , **__a ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__a , **__a ) , Circumscribe(cpu_left_col_base[-1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_a = a_c
_a = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__a ) , FadeOut(__a , run_time=0.5 ) , )
_a = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) , MoveToTarget(__a ) )
self.wait()
| 521 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100_0000 ) -> int:
_a = set(range(3 , lowercase , 2 ) )
primes.add(2 )
for p in range(3 , lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase , lowercase ) ) )
_a = [float(lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase , limit + 1 , lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 521 | 1 |
import json
import sys
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
with open(UpperCAmelCase__ , encoding='''utf-8''' ) as f:
a :Any = json.load(UpperCAmelCase__ )
a :Tuple = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(UpperCAmelCase__ ):
a :Union[str, Any] = results[benchmark_name]
a :Dict = benchmark_name.split('''/''' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
a :Tuple = '''| metric |'''
a :List[Any] = '''|--------|'''
a :Optional[int] = '''| new / old (diff) |'''
for metric_name in sorted(UpperCAmelCase__ ):
a :Union[str, Any] = benchmark_res[metric_name]
a :Dict = metric_vals['''new''']
a :str = metric_vals.get('''old''' , UpperCAmelCase__ )
a :List[str] = metric_vals.get('''diff''' , UpperCAmelCase__ )
a :List[str] = F''' {new_val:f}''' if isinstance(UpperCAmelCase__ , (int, float) ) else '''None'''
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(UpperCAmelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(UpperCAmelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(UpperCAmelCase__ ) )
if __name__ == "__main__":
snake_case : Optional[int] = sys.argv[1]
snake_case : Any = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 445 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case : Dict = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case : str = concatenate_datasets
snake_case : Any = DownloadConfig
snake_case : int = DownloadManager
snake_case : int = DownloadMode
snake_case : List[Any] = DownloadConfig
snake_case : List[str] = DownloadMode
snake_case : int = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 605 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a__( lowercase_ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = DistilBertModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = DistilBertForMaskedLM(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = DistilBertForQuestionAnswering(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = DistilBertForSequenceClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = DistilBertForTokenClassification(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_choices
lowerCAmelCase = DistilBertForMultipleChoice(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a__( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCAmelCase_ : List[str] = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Any = True
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DistilBertModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , dim=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = DistilBertModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
@slow
@require_torch_gpu
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=__lowerCAmelCase)
lowerCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = torch.jit.trace(
__lowerCAmelCase , (inputs_dict["""input_ids"""].to("""cpu"""), inputs_dict["""attention_mask"""].to("""cpu""")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , """traced_model.pt"""))
lowerCAmelCase = torch.jit.load(os.path.join(__lowerCAmelCase , """traced_model.pt""") , map_location=__lowerCAmelCase)
loaded(inputs_dict["""input_ids"""].to(__lowerCAmelCase) , inputs_dict["""attention_mask"""].to(__lowerCAmelCase))
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DistilBertModel.from_pretrained("""distilbert-base-uncased""")
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase)[0]
lowerCAmelCase = torch.Size((1, 11, 768))
self.assertEqual(output.shape , __lowerCAmelCase)
lowerCAmelCase = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4))
| 720 | '''simple docstring'''
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = len(__lowerCAmelCase)
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , __lowerCAmelCase):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__lowerCAmelCase)
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase , _lowercase = 0 ):
'''simple docstring'''
__a , __a : List[str] = row, column
__a : List[Any] = [[default_value for c in range(__A )] for r in range(__A )]
def __str__(self ):
'''simple docstring'''
__a : Union[str, Any] = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__a : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
__a : Any = max(__A , len(str(__A ) ) )
__a : List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(_lowercase ) -> str:
nonlocal string_format_identifier
__a : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__A ) for row_vector in self.array )
return s
def __repr__(self ):
'''simple docstring'''
return str(self )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if not (isinstance(__A , (list, tuple) ) and len(__A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self , _lowercase ):
'''simple docstring'''
assert self.validate_indicies(__A )
return self.array[loc[0]][loc[1]]
def __setitem__(self , _lowercase , _lowercase ):
'''simple docstring'''
assert self.validate_indicies(__A )
__a : List[Any] = value
def __add__(self , _lowercase ):
'''simple docstring'''
assert isinstance(__A , __A )
assert self.row == another.row and self.column == another.column
# Add
__a : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a : str = self[r, c] + another[r, c]
return result
def __neg__(self ):
'''simple docstring'''
__a : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a : int = -self[r, c]
return result
def __sub__(self , _lowercase ):
'''simple docstring'''
return self + (-another)
def __mul__(self , _lowercase ):
'''simple docstring'''
if isinstance(__A , (int, float) ): # Scalar multiplication
__a : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a : str = self[r, c] * another
return result
elif isinstance(__A , __A ): # Matrix multiplication
assert self.column == another.row
__a : List[str] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a : Any = F'''Unsupported type given for another ({type(__A )})'''
raise TypeError(__A )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a : List[Any] = self[r, c]
return result
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
assert isinstance(__A , __A ) and isinstance(__A , __A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a : List[Any] = v.transpose()
__a : Dict = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __magic_name__ ( ):
# a^(-1)
__a : List[str] = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a : Optional[int] = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__a : List[Any] = Matrix(3 , 1 , 0 )
__a , __a , __a : Optional[Any] = 1, 2, -3
__a : Dict = Matrix(3 , 1 , 0 )
__a , __a , __a : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}''' )
def __magic_name__ ( ):
import doctest
doctest.testmod()
testa()
| 581 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ):
model.train()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE__ = RegressionModel()
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 6 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=2 , lowercase=32 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=4 , lowercase=[0, 1, 2, 3] , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=3 , lowercase=[1, 384, 24, 24] , lowercase=True , lowercase=None , ):
_lowerCamelCase : List[str] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = patch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : int = use_labels
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Optional[int] = backbone_out_indices
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : Optional[Any] = backbone_featmap_shape
_lowerCamelCase : Union[str, Any] = scope
_lowerCamelCase : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCamelCase : Optional[int] = num_patches + 1
def A_ ( self ):
_lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Any = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
_lowerCamelCase : List[str] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[str] = DPTModel(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Any = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : Union[str, Any] = DPTForDepthEstimation(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[Any] = model(lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : Union[str, Any] = DPTForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : List[str] = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self ):
_lowerCamelCase : Optional[int] = DPTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def A_ ( self ):
pass
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : int = model_class(lowercase )
_lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
def A_ ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str = True
if model_class in get_values(lowercase ):
continue
_lowerCamelCase : Dict = model_class(lowercase )
model.to(lowercase )
model.train()
_lowerCamelCase : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
_lowerCamelCase : Union[str, Any] = model(**lowercase ).loss
loss.backward()
def A_ ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = False
_lowerCamelCase : Dict = True
if model_class in get_values(lowercase ) or not model_class.supports_gradient_checkpointing:
continue
_lowerCamelCase : Dict = model_class(lowercase )
model.to(lowercase )
model.gradient_checkpointing_enable()
model.train()
_lowerCamelCase : Any = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
_lowerCamelCase : Union[str, Any] = model(**lowercase ).loss
loss.backward()
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[str] = _config_zero_init(lowercase )
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(config=lowercase )
# Skip the check for the backbone
_lowerCamelCase : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowerCamelCase : int = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A_ ( self ):
pass
@slow
def A_ ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowerCamelCase : List[Any] = DPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def A_ ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = 'add'
with self.assertRaises(lowercase ):
_lowerCamelCase : Dict = DPTForDepthEstimation(lowercase )
def _snake_case ( ):
_lowerCamelCase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Dict = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_lowerCamelCase : str = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(lowercase )
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : str = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_lowerCamelCase : str = model(**lowercase )
_lowerCamelCase : int = outputs.predicted_depth
# verify the predicted depth
_lowerCamelCase : Optional[Any] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , lowercase )
_lowerCamelCase : Any = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , lowercase , atol=1E-4 ) ) | 492 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowercase__ = logging.get_logger(__name__)
lowercase__ = {}
lowercase__ = {}
lowercase__ = {}
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None , ):
_lowerCamelCase : str = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
_lowerCamelCase : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
_lowerCamelCase : Dict = format_type
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None ):
_lowerCamelCase : Dict = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_lowerCamelCase : Optional[Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
lowercase__ = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
lowercase__ = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
lowercase__ = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def _snake_case ( lowercase__ ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _snake_case ( lowercase__ , **lowercase__ ):
_lowerCamelCase : List[str] = get_format_type_from_alias(lowercase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowercase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' ) | 492 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowercase : Any = logging.get_logger(__name__)
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if not conversation_id:
snake_case : str = uuid.uuida()
if past_user_inputs is None:
snake_case : Any = []
if generated_responses is None:
snake_case : Dict = []
snake_case : uuid.UUID = conversation_id
snake_case : List[str] = past_user_inputs
snake_case : List[str] = generated_responses
snake_case : Optional[str] = text
def __eq__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
snake_case : Optional[Any] = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
snake_case : Any = text
def snake_case_ ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case : Tuple = None
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self.generated_responses.append(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
snake_case : List[str] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
snake_case : List[Any] = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
snake_case , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if self.tokenizer.pad_token_id is None:
snake_case : int = self.tokenizer.eos_token
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = {}
snake_case : str = {}
snake_case : List[Any] = {}
if min_length_for_response is not None:
snake_case : Optional[int] = min_length_for_response
if minimum_tokens is not None:
snake_case : Dict = minimum_tokens
if "max_length" in generate_kwargs:
snake_case : Optional[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0 ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ ,num_workers=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) == 1:
return outputs[0]
return outputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=32 ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ):
snake_case : Optional[Any] = self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case : List[str] = self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE_ )
if self.framework == "pt":
snake_case : Dict = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case : Dict = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=10 ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
snake_case : Any = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
snake_case : List[str] = max_length - minimum_tokens
snake_case : List[str] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
snake_case : Dict = model_inputs["""attention_mask"""][:, -trim:]
snake_case : Tuple = model_inputs.pop("""conversation""" )
snake_case : Tuple = max_length
snake_case : List[Any] = self.model.generate(**SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if self.model.config.is_encoder_decoder:
snake_case : Tuple = 1
else:
snake_case : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=True ):
'''simple docstring'''
snake_case : Optional[int] = model_outputs["""output_ids"""]
snake_case : List[str] = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE_ ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ,)
snake_case : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE_ )
return conversation
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = self.tokenizer.eos_token_id
snake_case : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > self.tokenizer.model_max_length:
snake_case : Tuple = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 36 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 324 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Tuple:
a__ : Dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
a__ : Any = key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
a__ : List[str] = key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
a__ : Dict = key[key.find("patch_embed" ) + len("patch_embed" )]
a__ : Optional[int] = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(__UpperCamelCase )-1}' )
if "norm" in key:
a__ : Tuple = key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
a__ : int = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
a__ : Union[str, Any] = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(__UpperCamelCase )-1}' )
if "layer_norm1" in key:
a__ : Any = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
a__ : Union[str, Any] = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
a__ : Tuple = key[key.find("block" ) + len("block" )]
a__ : Union[str, Any] = key.replace(F'block{idx}' , F'block.{int(__UpperCamelCase )-1}' )
if "attn.q" in key:
a__ : Optional[Any] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
a__ : Optional[Any] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
a__ : List[Any] = key.replace("attn" , "attention.self" )
if "fc1" in key:
a__ : Any = key.replace("fc1" , "dense1" )
if "fc2" in key:
a__ : Tuple = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
a__ : List[str] = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
a__ : Optional[Any] = key.replace("linear_fuse.conv" , "linear_fuse" )
a__ : Any = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
a__ : Any = key[key.find("linear_c" ) + len("linear_c" )]
a__ : Dict = key.replace(F'linear_c{idx}' , F'linear_c.{int(__UpperCamelCase )-1}' )
if "bot_conv" in key:
a__ : Dict = key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
a__ : Dict = key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
a__ : Union[str, Any] = key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
a__ : Optional[Any] = key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
a__ : Optional[int] = key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
a__ : Any = key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
a__ : List[str] = key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
a__ : int = key.replace("module.last_layer_depth" , "head.head" )
a__ : Any = value
return new_state_dict
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
a__ : List[str] = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
a__ : str = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
a__ : Dict = kv_weight[
: config.hidden_sizes[i], :
]
a__ : List[Any] = kv_bias[: config.hidden_sizes[i]]
a__ : List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
a__ : List[str] = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE( ) -> int:
a__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : Dict = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=None ) -> int:
a__ : str = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
a__ : str = GLPNImageProcessor()
# prepare image
a__ : Union[str, Any] = prepare_img()
a__ : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
a__ : Union[str, Any] = torch.load(__UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
a__ : Any = rename_keys(__UpperCamelCase )
# key and value matrices need special treatment
read_in_k_v(__UpperCamelCase , __UpperCamelCase )
# create HuggingFace model and load state dict
a__ : Union[str, Any] = GLPNForDepthEstimation(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# forward pass
a__ : int = model(__UpperCamelCase )
a__ : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
a__ : Tuple = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
a__ : int = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
a__ : Any = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
lowerCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 720 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class _a ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = " " ):
"""simple docstring"""
a__ : List[Any] = sentence_delimiter
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return list(__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : str = []
for sent_idx, sentence in enumerate(__UpperCAmelCase ):
chars.extend(self.process_string(__UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCamelCase = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowerCamelCase = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )["wer"]
a__ : Any = 0
a__ : int = 0
for prediction, reference in zip(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Tuple = jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 207 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =(
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ =(
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ =False
snake_case_ =False
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase=False ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
lowerCAmelCase__ : int = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
return inputs_dict
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=7 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=99 ,__lowerCamelCase=32 ,__lowerCamelCase=32 ,__lowerCamelCase=2 ,__lowerCamelCase=4 ,__lowerCamelCase=37 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=16 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=3 ,__lowerCamelCase=4 ,__lowerCamelCase=None ,) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : str = seq_length
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : Tuple = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : Union[str, Any] = num_choices
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Optional[int] = embedding_size
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
lowerCAmelCase__ : Optional[int] = MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,embedding_size=self.embedding_size ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = TFMobileBertModel(config=__lowerCamelCase )
lowerCAmelCase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : int = model(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = [input_ids, input_mask]
lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = TFMobileBertForMaskedLM(config=__lowerCamelCase )
lowerCAmelCase__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : str = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = TFMobileBertForNextSentencePrediction(config=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = TFMobileBertForPreTraining(config=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Optional[Any] = TFMobileBertForSequenceClassification(config=__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.num_choices
lowerCAmelCase__ : Any = TFMobileBertForMultipleChoice(config=__lowerCamelCase )
lowerCAmelCase__ : Dict = tf.tile(tf.expand_dims(__lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase__ : Any = tf.tile(tf.expand_dims(__lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase__ : Tuple = tf.tile(tf.expand_dims(__lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase__ : int = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ : Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = TFMobileBertForTokenClassification(config=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = TFMobileBertForQuestionAnswering(config=__lowerCamelCase )
lowerCAmelCase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self ,config_class=__lowerCamelCase ,hidden_size=37 )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
lowerCAmelCase__ : List[Any] = TFMobileBertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowerCAmelCase__ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Dict = model(__lowerCamelCase )[0]
lowerCAmelCase__ : Optional[int] = [1, 6, 3_05_22]
self.assertEqual(output.shape ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,__lowerCamelCase ,atol=1e-4 )
| 647 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =RobertaConfig
snake_case_ ="""roberta"""
def __init__(self ,__lowerCamelCase ) -> int:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Any = RobertaEmbeddings(__lowerCamelCase )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =RobertaConfig
snake_case_ ="""roberta"""
def __init__(self ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Any = config.num_labels
lowerCAmelCase__ : List[Any] = config.num_hidden_layers
lowerCAmelCase__ : int = DeeRobertaModel(__lowerCamelCase )
lowerCAmelCase__ : Any = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase__ : Dict = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=-1 ,__lowerCamelCase=False ,) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.num_layers
try:
lowerCAmelCase__ : Union[str, Any] = self.roberta(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,position_ids=__lowerCamelCase ,head_mask=__lowerCamelCase ,inputs_embeds=__lowerCamelCase ,)
lowerCAmelCase__ : Optional[int] = outputs[1]
lowerCAmelCase__ : str = self.dropout(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.classifier(__lowerCamelCase )
lowerCAmelCase__ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase__ : Union[str, Any] = e.message
lowerCAmelCase__ : Tuple = e.exit_layer
lowerCAmelCase__ : str = outputs[0]
if not self.training:
lowerCAmelCase__ : Optional[int] = entropy(__lowerCamelCase )
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : Dict = MSELoss()
lowerCAmelCase__ : List[Any] = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : Any = CrossEntropyLoss()
lowerCAmelCase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
lowerCAmelCase__ : Dict = []
for highway_exit in outputs[-1]:
lowerCAmelCase__ : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : int = MSELoss()
lowerCAmelCase__ : int = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : Union[str, Any] = CrossEntropyLoss()
lowerCAmelCase__ : Dict = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(__lowerCamelCase )
if train_highway:
lowerCAmelCase__ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase__ : Tuple = (loss,) + outputs
if not self.training:
lowerCAmelCase__ : Union[str, Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase__ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 647 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def A ( snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE__ = quote(snake_case__ )
return hfh.hf_hub_url(snake_case__ , snake_case__ , repo_type="""dataset""" , revision=snake_case__ )
| 720 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Tuple = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : Tuple = concatenate_datasets
A_ : List[str] = DownloadConfig
A_ : int = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : Optional[int] = DownloadConfig
A_ : List[Any] = DownloadMode
A_ : Any = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 616 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = KandinskyImgaImgPipeline
snake_case__ = ["prompt", "image_embeds", "negative_image_embeds", "image"]
snake_case__ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
snake_case__ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def a ( self : Any ) -> Dict:
return 32
@property
def a ( self : Optional[Any] ) -> Tuple:
return 32
@property
def a ( self : Tuple ) -> Union[str, Any]:
return self.time_input_dim
@property
def a ( self : Dict ) -> List[Any]:
return self.time_input_dim * 4
@property
def a ( self : int ) -> int:
return 100
@property
def a ( self : List[str] ) -> int:
lowerCAmelCase__ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def a ( self : List[Any] ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
lowerCAmelCase__ = MultilingualCLIP(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = text_encoder.eval()
return text_encoder
@property
def a ( self : int ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase__ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def a ( self : Optional[Any] ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self : Tuple ) -> int:
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = self.dummy_tokenizer
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCAmelCase__ = DDIMScheduler(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]=0 ) -> str:
lowerCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(SCREAMING_SNAKE_CASE__ )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("RGB" ).resize((256, 256) )
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def a ( self : List[str] ) -> Dict:
lowerCAmelCase__ = "cpu"
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : str ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Dict ) -> Tuple:
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase__ = "A red cartoon frog, 4k"
lowerCAmelCase__ = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase__ = pipeline(
SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 61 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
lowercase_ : Tuple = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowercase_ : Dict = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowercase_ : List[str] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(snake_case__ , snake_case__ , sample_weight=snake_case__ ) ),
}
| 572 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : int =ProphetNetTokenizer
A__ : int =False
def A_ ( self : Tuple ):
super().setUp()
SCREAMING_SNAKE_CASE__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE__ = 'unwanted, running'
return input_text, output_text
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(__a ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
SCREAMING_SNAKE_CASE__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE__ = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
SCREAMING_SNAKE_CASE__ = tokenizer(__a , padding=__a , return_tensors='pt' )
self.assertIsInstance(__a , __a )
SCREAMING_SNAKE_CASE__ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def A_ ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A_ ( self : Optional[Any] ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A_ ( self : str ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('sequence builders' , add_special_tokens=__a )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__a )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 712 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple ="""mvp"""
A__ : Optional[int] =["""past_key_values"""]
A__ : List[str] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Dict , UpperCAmelCase_ : List[str]=50267 , UpperCAmelCase_ : Optional[Any]=1024 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Tuple=4096 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=4096 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : List[str]=1024 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=100 , UpperCAmelCase_ : Optional[Any]=800 , **UpperCAmelCase_ : Tuple , ):
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ = use_prompt
SCREAMING_SNAKE_CASE__ = prompt_length
SCREAMING_SNAKE_CASE__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 400 | 0 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def A__ ( self ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = NystromformerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = NystromformerForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = NystromformerForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = NystromformerForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = NystromformerForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = NystromformerForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Optional[int] = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Any = False
def A__ ( self ):
UpperCAmelCase_ = NystromformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = NystromformerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
UpperCAmelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
@slow
def A__ ( self ):
UpperCAmelCase_ = "the [MASK] of Belgium is Brussels"
UpperCAmelCase_ = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
UpperCAmelCase_ = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
UpperCAmelCase_ = tokenizer(lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase_ = model(encoding.input_ids ).logits
UpperCAmelCase_ = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowerCAmelCase ) , "capital" )
| 579 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 579 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__lowercase = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__lowercase = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__lowercase = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Optional[int]=False ) -> Any:
"""simple docstring"""
A_ = compute_bleu(
reference_corpus=lowerCamelCase__ , translation_corpus=lowerCamelCase__ , max_order=lowerCamelCase__ , smooth=lowerCamelCase__ )
((A_) ,(A_) ,(A_) ,(A_) ,(A_) ,(A_)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 563 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase = [0, 25, 50]
__lowercase = [25, 50, 75]
__lowercase = fuzz.membership.trimf(X, abca)
__lowercase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase = np.ones(75)
__lowercase = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 563 | 1 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCAmelCase_ = 2_9_9_7_9_2_4_5_8
# Symbols
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = symbols('''ct x y z''')
def _A ( UpperCAmelCase ):
'''simple docstring'''
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def _A ( UpperCAmelCase ):
'''simple docstring'''
return 1 / sqrt(1 - beta(UpperCAmelCase ) ** 2 )
def _A ( UpperCAmelCase ):
'''simple docstring'''
return np.array(
[
[gamma(UpperCAmelCase ), -gamma(UpperCAmelCase ) * beta(UpperCAmelCase ), 0, 0],
[-gamma(UpperCAmelCase ) * beta(UpperCAmelCase ), gamma(UpperCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _A ( UpperCAmelCase ,UpperCAmelCase = None ):
'''simple docstring'''
if event is None:
A__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCAmelCase_ = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
lowerCAmelCase_ = {ct: c, x: 1, y: 1, z: 1}
lowerCAmelCase_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 531 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__snake_case: Union[str, Any] = StableUnCLIPPipeline
__snake_case: List[str] = TEXT_TO_IMAGE_PARAMS
__snake_case: str = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case: Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case: List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__snake_case: str = False
def _UpperCamelCase (self : Optional[int] ) -> int:
"""simple docstring"""
A__ = 32
A__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a , projection_dim=a , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
A__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a , num_layers=1 , )
torch.manual_seed(0 )
A__ = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=10_00 , clip_sample=a , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
A__ = StableUnCLIPImageNormalizer(embedding_dim=a )
A__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a , layers_per_block=1 , upcast_attention=a , use_linear_projection=a , )
torch.manual_seed(0 )
A__ = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=a , steps_offset=1 , )
torch.manual_seed(0 )
A__ = AutoencoderKL()
A__ = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def _UpperCamelCase (self : List[Any] , a : Optional[Any] , a : str=0 ) -> Dict:
"""simple docstring"""
if str(a ).startswith('mps' ):
A__ = torch.manual_seed(a )
else:
A__ = torch.Generator(device=a ).manual_seed(a )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase (self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=a )
def _UpperCamelCase (self : Dict ) -> str:
"""simple docstring"""
A__ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=a )
@slow
@require_torch_gpu
class _snake_case( unittest.TestCase ):
def _UpperCamelCase (self : Any ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = pipe('anime turle' , generator=a , output_type='np' )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(a , a )
def _UpperCamelCase (self : Optional[Any] ) -> str:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
A__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 531 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCAmelCase : Any = object()
# For specifying empty leaf dict `{}`
UpperCAmelCase : Any = object()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
lowercase_ = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = _get_partition_rules()
lowercase_ = _replacement_rules(__lowerCAmelCase )
lowercase_ = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
lowercase_ = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 100 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> None:
'''simple docstring'''
lowercase_ , lowercase_ = analyze_text(__lowerCAmelCase )
lowercase_ = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowercase_ = sum(single_char_strings.values() )
# one length string
lowercase_ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowercase_ = single_char_strings[ch]
lowercase_ = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCAmelCase ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowercase_ = sum(two_char_strings.values() )
lowercase_ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowercase_ = cha + cha
if sequence in two_char_strings:
lowercase_ = two_char_strings[sequence]
lowercase_ = int(__lowerCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCAmelCase )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[dict, dict]:
'''simple docstring'''
lowercase_ = Counter() # type: ignore
lowercase_ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 100 | 1 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase__ = get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = "dummy_data"
SCREAMING_SNAKE_CASE__ :Optional[int] = "datasets"
SCREAMING_SNAKE_CASE__ :List[Any] = False
def __init__( self : Optional[int] , __a : str , __a : str , __a : Union[Version, str] , __a : Optional[str] = None , __a : bool = False , __a : bool = True , __a : Optional[List[Callable]] = None , ) -> List[str]:
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : int = dataset_name
_UpperCamelCase : int = cache_dir
_UpperCamelCase : Union[str, Any] = use_local_dummy_data
_UpperCamelCase : Optional[int] = config
# download_callbacks take a single url as input
_UpperCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_UpperCamelCase : Optional[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_UpperCamelCase : str = str(__a )
# to be downloaded
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Dict = None
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if self._dummy_file is None:
_UpperCamelCase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_UpperCamelCase : Tuple = cached_path(
__a , cache_dir=self.cache_dir , extract_compressed_file=__a , force_extract=__a )
return os.path.join(__a , self.dummy_file_name )
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
if self._bucket_url is None:
_UpperCamelCase : Any = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] , *__a : Tuple ) -> List[str]:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_UpperCamelCase : str = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_UpperCamelCase : Any = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__a , __a ):
return self.create_dummy_data_dict(__a , __a )
elif isinstance(__a , (list, tuple) ):
return self.create_dummy_data_list(__a , __a )
else:
return self.create_dummy_data_single(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str , *__a : Union[str, Any] ) -> int:
return self.download_and_extract(__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[Any] , __a : List[str] ) -> Any:
return self.download_and_extract(__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> int:
return path
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return {}
def __SCREAMING_SNAKE_CASE ( self : str , __a : Union[str, Any] , __a : Union[str, Any] ) -> List[str]:
_UpperCamelCase : List[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__a , __a ):
for single_url in single_urls:
download_callback(__a )
else:
_UpperCamelCase : Optional[Any] = single_urls
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__a , __a ):
_UpperCamelCase : List[Any] = [os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) ) for x in single_urls]
else:
_UpperCamelCase : Union[str, Any] = single_urls
_UpperCamelCase : List[str] = os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) )
_UpperCamelCase : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(__a , __a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_UpperCamelCase : int = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str , __a : Union[str, Any] ) -> int:
_UpperCamelCase : Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_UpperCamelCase : str = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __a ) ) for url in data_url )
_UpperCamelCase : List[str] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_UpperCamelCase : Tuple = [data_url[0]] * len(__a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCamelCase : Any = os.path.join(__a , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__a )
return dummy_data_list
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Union[str, Any] , __a : int ) -> List[str]:
for download_callback in self.download_callbacks:
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_UpperCamelCase : Optional[Any] = os.path.join(__a , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Dict ) -> Any:
def _iter_archive_members(__a : List[str] ):
# this preserves the order of the members inside the ZIP archive
_UpperCamelCase : List[Any] = Path(self.dummy_file ).parent
_UpperCamelCase : List[str] = path.relative_to(__a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_UpperCamelCase : str = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__a )
_UpperCamelCase : Any = Path(__a )
_UpperCamelCase : Union[str, Any] = _iter_archive_members(__a ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__a ).as_posix(), file_path.open("rb" )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Any:
if not isinstance(__a , __a ):
_UpperCamelCase : List[str] = [paths]
for path in paths:
if os.path.isfile(__a ):
if os.path.basename(__a ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__a ):
if os.path.basename(__a ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__a ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__a , __a )
| 624 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE__ :Dict = "AutoImageProcessor"
SCREAMING_SNAKE_CASE__ :Optional[int] = "AutoTokenizer"
def __init__( self : Dict , __a : Tuple , __a : str ) -> Optional[Any]:
super().__init__(__a , __a )
_UpperCamelCase : Tuple = self.image_processor
def __call__( self : Tuple , __a : Tuple=None , __a : str=None , __a : Any=None , **__a : Optional[Any] ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
_UpperCamelCase : str = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_UpperCamelCase : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , *__a : Optional[int] , **__a : Optional[int] ) -> List[Any]:
return self.tokenizer.batch_decode(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : Any , **__a : List[Any] ) -> Tuple:
return self.tokenizer.decode(*__a , **__a )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return ["input_ids", "attention_mask", "pixel_values"]
| 624 | 1 |
import re
def __lowerCamelCase ( __a : List[str] ) -> str:
_lowercase =re.compile(R"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(__a , __a ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 709 | import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase__ = None
lowerCAmelCase__ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase__ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _a :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "PIL.Image.Image"
__SCREAMING_SNAKE_CASE = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__SCREAMING_SNAKE_CASE = field(default='Image' , init=lowerCamelCase_ , repr=lowerCamelCase_ )
def __call__( self ):
return self.pa_type
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =np.array(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
_lowercase ={}
_lowercase , _lowercase =value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCAmelCase_ ):
_lowercase =PIL.Image.open(lowerCAmelCase_ )
else:
_lowercase =path.split("::" )[-1]
try:
_lowercase =string_to_dict(lowerCAmelCase_ , config.HUB_DATASETS_URL )["repo_id"]
_lowercase =token_per_repo_id.get(lowerCAmelCase_ )
except ValueError:
_lowercase =None
with xopen(lowerCAmelCase_ , "rb" , use_auth_token=lowerCAmelCase_ ) as f:
_lowercase =BytesIO(f.read() )
_lowercase =PIL.Image.open(bytes_ )
else:
_lowercase =PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowerCAmelCase ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if pa.types.is_string(storage.type ):
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
_lowercase =pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
_lowercase =pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowercase =storage.field("bytes" )
else:
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowercase =storage.field("path" )
else:
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
_lowercase =pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_lowercase =pa.array(
[encode_np_array(np.array(lowerCAmelCase_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
_lowercase =pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase_ ):
with xopen(lowerCAmelCase_ , "rb" ) as f:
_lowercase =f.read()
return bytes_
_lowercase =pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowercase =pa.array(
[os.path.basename(lowerCAmelCase_ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
_lowercase =pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_lowercase =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( __a : "PIL.Image.Image" ) -> bytes:
_lowercase =BytesIO()
if image.format in list_image_compression_formats():
_lowercase =image.format
else:
_lowercase ="PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__a , format=__a )
return buffer.getvalue()
def __lowerCamelCase ( __a : "PIL.Image.Image" ) -> dict:
if hasattr(__a , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__a )}
def __lowerCamelCase ( __a : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
_lowercase =array.dtype
_lowercase =dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
_lowercase =dtype.kind
_lowercase =dtype.itemsize
_lowercase =None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_lowercase =np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_lowercase =dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_lowercase =dtype_byteorder + dtype_kind + str(__a )
_lowercase =np.dtype(__a )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
_lowercase =PIL.Image.fromarray(array.astype(__a ) )
return {"path": None, "bytes": image_to_bytes(__a )}
def __lowerCamelCase ( __a : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
_lowercase , _lowercase =first_non_null_value(__a )
if isinstance(__a , __a ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__a , np.ndarray ):
_lowercase =no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
elif isinstance(__a , PIL.Image.Image ):
_lowercase =no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
else:
return objs
else:
return objs
| 594 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase = logging.getLogger(__name__)
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCAmelCase__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCAmelCase__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCAmelCase__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCAmelCase__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"""{len(lowerCAmelCase__ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = f"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
rslt.append(lowerCAmelCase__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(f"""{len(lowerCAmelCase__ )} examples processed.""" )
UpperCAmelCase_ = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(lowerCAmelCase__ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(lowerCAmelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(lowerCAmelCase__ , "wb" ) as handle:
pickle.dump(rslt_ , lowerCAmelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 82 |
'''simple docstring'''
def lowerCamelCase_ ( __UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 ) -> int:
"""simple docstring"""
_A = right or len(__UpperCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCamelCase , __UpperCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Optional[Any] = """xlnet"""
__a : Union[str, Any] = ["""mems"""]
__a : int = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self, snake_case__=3_20_00, snake_case__=10_24, snake_case__=24, snake_case__=16, snake_case__=40_96, snake_case__="gelu", snake_case__=True, snake_case__="bi", snake_case__=0.02, snake_case__=1E-12, snake_case__=0.1, snake_case__=5_12, snake_case__=None, snake_case__=True, snake_case__=False, snake_case__=False, snake_case__=-1, snake_case__=False, snake_case__="last", snake_case__=True, snake_case__="tanh", snake_case__=0.1, snake_case__=5, snake_case__=5, snake_case__=5, snake_case__=1, snake_case__=2, **snake_case__, ) -> Dict:
"""simple docstring"""
lowercase_ : Tuple = vocab_size
lowercase_ : List[Any] = d_model
lowercase_ : Any = n_layer
lowercase_ : Tuple = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
lowercase_ : List[str] = d_model // n_head
lowercase_ : List[str] = ff_activation
lowercase_ : str = d_inner
lowercase_ : Optional[Any] = untie_r
lowercase_ : Optional[Any] = attn_type
lowercase_ : str = initializer_range
lowercase_ : Any = layer_norm_eps
lowercase_ : int = dropout
lowercase_ : Union[str, Any] = mem_len
lowercase_ : List[str] = reuse_len
lowercase_ : Dict = bi_data
lowercase_ : Any = clamp_len
lowercase_ : Tuple = same_length
lowercase_ : Dict = summary_type
lowercase_ : str = summary_use_proj
lowercase_ : Union[str, Any] = summary_activation
lowercase_ : Optional[Any] = summary_last_dropout
lowercase_ : str = start_n_top
lowercase_ : Dict = end_n_top
lowercase_ : Any = bos_token_id
lowercase_ : Optional[int] = pad_token_id
lowercase_ : List[Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""", snake_case__, )
lowercase_ : Optional[Any] = kwargs["""use_cache"""]
lowercase_ : int = use_mems_eval
lowercase_ : Union[str, Any] = use_mems_train
super().__init__(pad_token_id=snake_case__, bos_token_id=snake_case__, eos_token_id=snake_case__, **snake_case__ )
@property
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def snake_case__ ( self, snake_case__ ) -> List[str]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 703 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Tuple = """t5"""
__a : Optional[Any] = ["""past_key_values"""]
__a : Optional[int] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self, snake_case__=3_21_28, snake_case__=5_12, snake_case__=64, snake_case__=20_48, snake_case__=6, snake_case__=None, snake_case__=8, snake_case__=32, snake_case__=1_28, snake_case__=0.1, snake_case__=1E-6, snake_case__=1.0, snake_case__="relu", snake_case__=True, snake_case__=True, snake_case__=0, snake_case__=1, **snake_case__, ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = vocab_size
lowercase_ : int = d_model
lowercase_ : int = d_kv
lowercase_ : Optional[Any] = d_ff
lowercase_ : Union[str, Any] = num_layers
lowercase_ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase_ : str = num_heads
lowercase_ : int = relative_attention_num_buckets
lowercase_ : int = relative_attention_max_distance
lowercase_ : Any = dropout_rate
lowercase_ : Optional[Any] = layer_norm_epsilon
lowercase_ : int = initializer_factor
lowercase_ : Union[str, Any] = feed_forward_proj
lowercase_ : Any = use_cache
lowercase_ : Optional[int] = self.feed_forward_proj.split("""-""" )
lowercase_ : Optional[Any] = act_info[-1]
lowercase_ : Union[str, Any] = act_info[0] == """gated"""
if len(snake_case__ ) > 1 and act_info[0] != "gated" or len(snake_case__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase_ : Optional[int] = """gelu_new"""
super().__init__(
pad_token_id=snake_case__, eos_token_id=snake_case__, is_encoder_decoder=snake_case__, **snake_case__, )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase_ : str = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowercase_ : Any = """past_encoder_sequence + sequence"""
lowercase_ : Tuple = {0: """batch"""}
lowercase_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase_ : int = {0: """batch""", 1: """decoder_sequence"""}
lowercase_ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case__, direction="""inputs""" )
return common_inputs
@property
def snake_case__ ( self ) -> int:
"""simple docstring"""
return 13 | 436 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase_ : Tuple = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] )-> Optional[int]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
elif weight_type == "running_mean":
__snake_case = value
elif weight_type == "running_var":
__snake_case = value
elif weight_type == "num_batches_tracked":
__snake_case = value
elif weight_type == "inv_freq":
__snake_case = value
else:
__snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : str )-> int:
'''simple docstring'''
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
__snake_case = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(_lowerCamelCase )[0].split('''.''' )[-2]
__snake_case = mapped_key.replace('''*''' , _lowerCamelCase )
if "pos_bias_u" in name:
__snake_case = None
elif "pos_bias_v" in name:
__snake_case = None
elif "weight_g" in name:
__snake_case = '''weight_g'''
elif "weight_v" in name:
__snake_case = '''weight_v'''
elif "bias" in name:
__snake_case = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case = '''weight'''
elif "running_mean" in name:
__snake_case = '''running_mean'''
elif "inv_freq" in name:
__snake_case = '''inv_freq'''
elif "running_var" in name:
__snake_case = '''running_var'''
elif "num_batches_tracked" in name:
__snake_case = '''num_batches_tracked'''
else:
__snake_case = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] )-> Optional[Any]:
'''simple docstring'''
__snake_case = full_name.split('''conv_layers.''' )[-1]
__snake_case = name.split('''.''' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : str=True )-> Optional[Any]:
'''simple docstring'''
if config_path is not None:
__snake_case = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act='''swish''' )
else:
__snake_case = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__snake_case = '''rotary'''
if is_finetuned:
if dict_path:
__snake_case = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.eos_index
__snake_case = len(target_dict.symbols )
__snake_case = os.path.join(_lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case = 0
__snake_case = 1
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
__snake_case = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCamelCase , )
__snake_case = True if config.feat_extract_norm == '''layer''' else False
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
__snake_case = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case = WavaVecaConformerForCTC(_lowerCamelCase )
else:
__snake_case = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case = argparse.Namespace(task='''audio_pretraining''' )
__snake_case = fairseq.tasks.setup_task(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
__snake_case = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase_ : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 24 | from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'AutoImageProcessor'
lowercase_ = 'AutoTokenizer'
def __init__( self : List[Any] , a_ : int , a_ : Union[str, Any] )-> List[Any]:
"""simple docstring"""
super().__init__(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = self.image_processor
def __call__( self : Tuple , a_ : str=None , a_ : List[Any]=None , a_ : Optional[Any]=None , **a_ : Dict )-> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def __lowercase( self : Dict , *a_ : Any , **a_ : Any )-> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : Dict , *a_ : Union[str, Any] , **a_ : Optional[int] )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def __lowercase( self : Any )-> Any:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 0 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCamelCase ( _A ):
def __init__( self , a_ , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , **a_ , ):
super().__init__(
features=a_ , cache_dir=a_ , keep_in_memory=a_ , streaming=a_ , num_proc=a_ , **a_ , )
lowerCAmelCase : int = Generator(
cache_dir=a_ , features=a_ , generator=a_ , gen_kwargs=a_ , **a_ , )
def _lowerCamelCase ( self ):
# Build iterable dataset
if self.streaming:
lowerCAmelCase : Optional[Any] = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : List[str] = None
lowerCAmelCase : List[Any] = None
lowerCAmelCase : str = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , num_proc=self.num_proc , )
lowerCAmelCase : int = self.builder.as_dataset(
split="train" , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
| 551 |
'''simple docstring'''
import numpy as np
def __A ( a_ : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = False , _A : int = 1 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : bool = False , _A : int = 1 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : int = None , _A : bool = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[Tuple] = None ):
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(_A ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 |
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int = 2 , lowerCAmelCase_ :int = 1 , lowerCAmelCase_ :int = 3 , )->int | None:
'''simple docstring'''
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :int ) -> int:
return (pow(lowerCAmelCase_ , 2 ) + step) % modulus
for _ in range(lowerCAmelCase_ ):
# These track the position within the cycle detection logic.
snake_case_ = seed
snake_case_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
snake_case_ = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = rand_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
snake_case_ = gcd(hare - tortoise , lowerCAmelCase_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
snake_case_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE :List[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
SCREAMING_SNAKE_CASE :List[str] = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 283 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCAmelCase : Dict = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
lowercase__ = "ernie_m"
lowercase__ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : str , lowerCAmelCase_ : int = 2_5_0_0_0_2 , lowerCAmelCase_ : int = 7_6_8 , lowerCAmelCase_ : int = 1_2 , lowerCAmelCase_ : int = 1_2 , lowerCAmelCase_ : int = 3_0_7_2 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 5_1_4 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : float = 1E-05 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Dict=0.0 , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=__A , **__A)
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = classifier_dropout
lowercase_ = is_decoder
lowercase_ = act_dropout
| 715 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowercase__ = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 5_0_2_5_7 , lowerCAmelCase_ : int = 1_0_2_4 , lowerCAmelCase_ : int = 7_6_8 , lowerCAmelCase_ : int = 1_2 , lowerCAmelCase_ : int = 1_2 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "gelu_new" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 1E-5 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
super().__init__()
lowercase_ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''')
lowercase_ = prefix_inner_dim
lowercase_ = prefix_hidden_dim
lowercase_ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowercase_ = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase_) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowercase_ = GPTaConfig(
vocab_size=lowerCAmelCase_ , n_positions=lowerCAmelCase_ , n_embd=lowerCAmelCase_ , n_layer=lowerCAmelCase_ , n_head=lowerCAmelCase_ , n_inner=lowerCAmelCase_ , activation_function=lowerCAmelCase_ , resid_pdrop=lowerCAmelCase_ , embd_pdrop=lowerCAmelCase_ , attn_pdrop=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , initializer_range=lowerCAmelCase_ , scale_attn_weights=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , scale_attn_by_inverse_layer_idx=lowerCAmelCase_ , reorder_and_upcast_attn=lowerCAmelCase_ , )
lowercase_ = GPTaLMHeadModel(lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : Optional[torch.Tensor] = None , lowerCAmelCase_ : Optional[torch.Tensor] = None , ):
"""simple docstring"""
lowercase_ = self.transformer.transformer.wte(lowerCAmelCase_)
lowercase_ = self.encode_prefix(lowerCAmelCase_)
lowercase_ = self.decode_prefix(lowerCAmelCase_)
lowercase_ = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
lowercase_ = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
lowercase_ = torch.cat((dummy_token, input_ids) , dim=1)
lowercase_ = self.transformer(inputs_embeds=lowerCAmelCase_ , labels=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.device):
"""simple docstring"""
return torch.zeros(lowerCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
return self.encode_prefix(lowerCAmelCase_)
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = torch.split(lowerCAmelCase_ , 1 , dim=0)
lowercase_ = []
lowercase_ = []
for feature in features:
lowercase_ = self.decode_prefix(feature.to(lowerCAmelCase_)) # back to the clip feature
# Only support beam search for now
lowercase_ , lowercase_ = self.generate_beam(
input_embeds=lowerCAmelCase_ , device=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
lowercase_ = torch.stack(lowerCAmelCase_)
lowercase_ = torch.stack(lowerCAmelCase_)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : int = 6_7 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
lowercase_ = eos_token_id
lowercase_ = None
lowercase_ = None
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.int)
lowercase_ = torch.zeros(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.bool)
if input_embeds is not None:
lowercase_ = input_embeds
else:
lowercase_ = self.transformer.transformer.wte(lowerCAmelCase_)
for i in range(lowerCAmelCase_):
lowercase_ = self.transformer(inputs_embeds=lowerCAmelCase_)
lowercase_ = outputs.logits
lowercase_ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowercase_ = logits.softmax(-1).log()
if scores is None:
lowercase_ , lowercase_ = logits.topk(lowerCAmelCase_ , -1)
lowercase_ = generated.expand(lowerCAmelCase_ , *generated.shape[1:])
lowercase_ , lowercase_ = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
lowercase_ = next_tokens
else:
lowercase_ = tokens.expand(lowerCAmelCase_ , *tokens.shape[1:])
lowercase_ = torch.cat((tokens, next_tokens) , dim=1)
else:
lowercase_ = -float(np.inf)
lowercase_ = 0
lowercase_ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowercase_ = scores_sum / seq_lengths[:, None]
lowercase_ , lowercase_ = scores_sum_average.view(-1).topk(lowerCAmelCase_ , -1)
lowercase_ = next_tokens // scores_sum.shape[1]
lowercase_ = seq_lengths[next_tokens_source]
lowercase_ = next_tokens % scores_sum.shape[1]
lowercase_ = next_tokens.unsqueeze(1)
lowercase_ = tokens[next_tokens_source]
lowercase_ = torch.cat((tokens, next_tokens) , dim=1)
lowercase_ = generated[next_tokens_source]
lowercase_ = scores_sum_average * seq_lengths
lowercase_ = is_stopped[next_tokens_source]
lowercase_ = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
lowercase_ = torch.cat((generated, next_token_embed) , dim=1)
lowercase_ = is_stopped + next_tokens.eq(lowerCAmelCase_).squeeze()
if is_stopped.all():
break
lowercase_ = scores / seq_lengths
lowercase_ = scores.argsort(descending=lowerCAmelCase_)
# tokens tensors are already padded to max_seq_length
lowercase_ = [tokens[i] for i in order]
lowercase_ = torch.stack(lowerCAmelCase_ , dim=0)
lowercase_ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 100 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__lowerCAmelCase = Features({"""text""": Value("""string""" )} )
__lowerCAmelCase = Features({} )
__lowerCAmelCase = "text"
@property
def lowercase_ ( self ):
'''simple docstring'''
return {self.text_column: "text"} | 523 | '''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
super().__init__(
features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
__UpperCAmelCase: Optional[int] = Generator(
cache_dir=snake_case_ , features=snake_case_ , generator=snake_case_ , gen_kwargs=snake_case_ , **snake_case_ , )
def lowercase_ ( self ):
'''simple docstring'''
if self.streaming:
__UpperCAmelCase: List[str] = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
__UpperCAmelCase: Union[str, Any] = None
__UpperCAmelCase: str = None
__UpperCAmelCase: Tuple = None
__UpperCAmelCase: Union[str, Any] = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
__UpperCAmelCase: List[str] = self.builder.as_dataset(
split="""train""" , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset | 523 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCAmelCase = ['''torch''', '''torchsde''']
def __init__( self , *a , **a ) -> Tuple:
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def _UpperCamelCase ( cls , *a , **a ) -> Optional[int]:
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def _UpperCamelCase ( cls , *a , **a ) -> Tuple:
requires_backends(cls , ['torch', 'torchsde'] )
| 607 |
from __future__ import annotations
lowercase = list[tuple[int, int]]
lowercase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a , a , a , a , a , ) -> List[Any]:
snake_case_ = pos_x
snake_case_ = pos_y
snake_case_ = (pos_y, pos_x)
snake_case_ = goal_x
snake_case_ = goal_y
snake_case_ = g_cost
snake_case_ = parent
snake_case_ = self.calculate_heuristic()
def _UpperCamelCase ( self ) -> float:
snake_case_ = abs(self.pos_x - self.goal_x )
snake_case_ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , a ) -> bool:
return self.f_cost < other.f_cost
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a ) -> Optional[Any]:
snake_case_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a )
snake_case_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a )
snake_case_ = [self.start]
snake_case_ = []
snake_case_ = False
def _UpperCamelCase ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case_ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case_ = True
return self.retrace_path(a )
self.closed_nodes.append(a )
snake_case_ = self.get_successors(a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a )
else:
# retrieve the best current path
snake_case_ = self.open_nodes.pop(self.open_nodes.index(a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a )
else:
self.open_nodes.append(a )
if not self.reached:
return [self.start.pos]
return None
def _UpperCamelCase ( self , a ) -> list[Node]:
snake_case_ = []
for action in delta:
snake_case_ = parent.pos_x + action[1]
snake_case_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a , a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a , ) )
return successors
def _UpperCamelCase ( self , a ) -> Path:
snake_case_ = node
snake_case_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase = (0, 0)
lowercase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase = GreedyBestFirst(init, goal)
lowercase = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase = 2
for elem in grid:
print(elem)
| 607 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
_UpperCamelCase : Dict = inspect.getfile(accelerate.test_utils )
_UpperCamelCase : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_UpperCamelCase : Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
_UpperCamelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
print(F'''Found {torch.cuda.device_count()} devices.''' )
_UpperCamelCase : Dict = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
_UpperCamelCase : str = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
_UpperCamelCase : int = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(__a , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase__ = Accelerator()
lowerCamelCase__ = (accelerator.state.process_index + 2, 10)
lowerCamelCase__ = torch.randint(0, 10, shape).to(accelerator.device)
lowerCamelCase__ = ""
lowerCamelCase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 624 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=5 ) -> List[Any]:
"""simple docstring"""
assert masked_input.count("<mask>" ) == 1
_UpperCamelCase : str = torch.tensor(tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ ) ).unsqueeze(0 ) # Batch size 1
_UpperCamelCase : Union[str, Any] = model(lowercase_ )[0] # The last hidden-state is the first element of the output tuple
_UpperCamelCase : List[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCamelCase : Tuple = logits[0, masked_index, :]
_UpperCamelCase : List[str] = logits.softmax(dim=0 )
_UpperCamelCase, _UpperCamelCase : str = prob.topk(k=lowercase_ ,dim=0 )
_UpperCamelCase : str = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase_ ) )] )
_UpperCamelCase : Any = tokenizer.mask_token
_UpperCamelCase : List[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
_UpperCamelCase : Optional[int] = predicted_token_bpe.replace("\u2581" ," " )
if " {0}".format(lowercase_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase_ ) ,lowercase_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase_ ,lowercase_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCamelCase__ = CamembertTokenizer.from_pretrained("camembert-base")
lowerCamelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
lowerCamelCase__ = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 624 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Any ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : List[Any] = {}
def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = super().add_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : str , *lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Dict = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
output.append(lowerCamelCase_ )
else:
_snake_case : int = []
for i in range(lowerCamelCase_ ):
_snake_case : List[Any] = placeholder_token + f'''_{i}'''
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
output.append(lowerCamelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
_snake_case : Optional[Any] = output
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Tuple=1.0 ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_snake_case : Dict = self.token_map[placeholder_token]
_snake_case : Tuple = tokens[: 1 + int(len(lowerCamelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
_snake_case : Optional[int] = copy.copy(lowerCamelCase_ )
random.shuffle(lowerCamelCase_ )
_snake_case : Dict = text.replace(lowerCamelCase_ , ' '.join(lowerCamelCase_ ) )
return text
def __call__( self : List[Any] , lowerCamelCase_ : Optional[Any] , *lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Union[str, Any]=1.0 , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ , )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int , *lowerCamelCase_ : Any , lowerCamelCase_ : Dict=False , lowerCamelCase_ : int=1.0 , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ , )
| 652 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_seq_length
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 1
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _A ( self , _A , _A , _A , _A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
__SCREAMING_SNAKE_CASE = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
__SCREAMING_SNAKE_CASE = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )['last_hidden_state']
__SCREAMING_SNAKE_CASE = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['last_hidden_state']
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : str = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCamelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : Any = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Tuple = False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCamelCase__ )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def _A ( self ):
'''simple docstring'''
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def _A ( self ):
'''simple docstring'''
pass
| 148 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''vit_mae'''
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=16 , lowerCamelCase__=512 , lowerCamelCase__=8 , lowerCamelCase__=2_048 , lowerCamelCase__=0.75 , lowerCamelCase__=False , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = decoder_num_attention_heads
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = decoder_num_hidden_layers
__lowerCamelCase = decoder_intermediate_size
__lowerCamelCase = mask_ratio
__lowerCamelCase = norm_pix_loss
| 469 | 0 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_SCREAMING_SNAKE_CASE = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ) -> Optional[int]:
'''simple docstring'''
__lowercase = None
__lowercase = os.path.abspath(os.path.join('''examples''' ,'''by_feature''' ) )
__lowercase = os.path.abspath('''examples''' )
for item in os.listdir(_lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
__lowercase = os.path.join(_lowerCamelCase ,_lowerCamelCase )
if os.path.isfile(_lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowerCamelCase ,feature_script=_lowerCamelCase ,tested_section='''main()''' if parser_only else '''training_function()''' ,):
__lowercase = compare_against_test(
os.path.join(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
__lowercase = '''\n'''.join(_lowerCamelCase )
if special_strings is not None:
for string in special_strings:
__lowercase = diff.replace(_lowerCamelCase ,'''''' )
self.assertEqual(_lowerCamelCase ,'''''' )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
self.one_complete_example('''complete_nlp_example.py''' ,_lowerCamelCase )
self.one_complete_example('''complete_nlp_example.py''' ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = os.path.abspath(os.path.join('''examples''' ,'''cv_example.py''' ) )
__lowercase = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
self.one_complete_example('''complete_cv_example.py''' ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = False
@classmethod
def _UpperCAmelCase (cls ) -> Union[str, Any]:
'''simple docstring'''
super().setUpClass()
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(cls._tmpdir ,'''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__lowercase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _UpperCAmelCase (cls ) -> Union[str, Any]:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,'''epoch_0''' ) ) )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = f"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
__lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,'''step_2''' ) ) )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir ,'epoch_0' )}\n ".split()
__lowercase = run_command(self._launch_args + testargs ,return_stdout=_lowerCamelCase )
self.assertNotIn('''epoch 0:''' ,_lowerCamelCase )
self.assertIn('''epoch 1:''' ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = f"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir ,'step_2' )}\n ".split()
__lowercase = run_command(self._launch_args + testargs ,return_stdout=_lowerCamelCase )
if torch.cuda.is_available():
__lowercase = torch.cuda.device_count()
else:
__lowercase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' ,_lowerCamelCase )
self.assertIn('''epoch 1:''' ,_lowerCamelCase )
else:
self.assertIn('''epoch 0:''' ,_lowerCamelCase )
self.assertIn('''epoch 1:''' ,_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ ,{'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__lowercase = run_command(self._launch_args + testargs ,return_stdout=_lowerCamelCase )
__lowercase = re.findall('''({.+})''' ,_lowerCamelCase )
__lowercase = [r for r in results if '''accuracy''' in r][-1]
__lowercase = ast.literal_eval(_lowerCamelCase )
self.assertGreaterEqual(results['''accuracy'''] ,0.7_5 )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
__lowercase = f"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase ,'''tracking''' ) ) )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 56 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : List[str] = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : str ='beit'
def __init__( self, lowerCAmelCase=8_192, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=224, lowerCAmelCase=16, lowerCAmelCase=3, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=True, lowerCAmelCase=[3, 5, 7, 11], lowerCAmelCase=[1, 2, 3, 6], lowerCAmelCase=True, lowerCAmelCase=0.4, lowerCAmelCase=256, lowerCAmelCase=1, lowerCAmelCase=False, lowerCAmelCase=255, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =use_mask_token
lowerCamelCase_ =use_absolute_position_embeddings
lowerCamelCase_ =use_relative_position_bias
lowerCamelCase_ =use_shared_relative_position_bias
lowerCamelCase_ =layer_scale_init_value
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase_ =out_indices
lowerCamelCase_ =pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase_ =use_auxiliary_head
lowerCamelCase_ =auxiliary_loss_weight
lowerCamelCase_ =auxiliary_channels
lowerCamelCase_ =auxiliary_num_convs
lowerCamelCase_ =auxiliary_concat_input
lowerCamelCase_ =semantic_loss_ignore_index
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 676 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A__ : Tuple= {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str= ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any]= [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A__ : Any= _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 709 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Dict = "pt"
elif is_tf_available():
_UpperCAmelCase : Dict = "tf"
else:
_UpperCAmelCase : List[Any] = "jax"
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :int = ByTaTokenizer
UpperCamelCase_ :List[str] = False
def __snake_case ( self : Union[str, Any] ):
super().setUp()
lowerCAmelCase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __snake_case ( self : Dict ):
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def __snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Any ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=20 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
try:
lowerCAmelCase__ = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ = list(filter(lambda SCREAMING_SNAKE_CASE_ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
lowerCAmelCase__ = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
lowerCAmelCase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
lowerCAmelCase__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
lowerCAmelCase__ = ''' ''' + output_txt
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
lowerCAmelCase__ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def __snake_case ( self : Any ):
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = '''Unicode €.'''
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , SCREAMING_SNAKE_CASE_ )
# decoding
lowerCAmelCase__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''Unicode €.</s>''' )
lowerCAmelCase__ = tokenizer('''e è é ê ë''' )
lowerCAmelCase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , SCREAMING_SNAKE_CASE_ )
# decoding
lowerCAmelCase__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if FRAMEWORK != "jax":
lowerCAmelCase__ = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , SCREAMING_SNAKE_CASE_ )
self.assertIn('''attention_mask''' , SCREAMING_SNAKE_CASE_ )
self.assertNotIn('''decoder_input_ids''' , SCREAMING_SNAKE_CASE_ )
self.assertNotIn('''decoder_attention_mask''' , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase__ = tokenizer(
text_target=SCREAMING_SNAKE_CASE_ , max_length=32 , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization. </s>''']
lowerCAmelCase__ = ['''Summary of the text. </s>''']
# fmt: off
lowerCAmelCase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCAmelCase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , text_target=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch['''input_ids'''][0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch['''labels'''][0] )
def __snake_case ( self : str ):
# safety check on max_len default value so we are sure the test works
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = after_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase__ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = after_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [f'<extra_id_{i}>' for i in range(125 )]
lowerCAmelCase__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def __snake_case ( self : Optional[int] ):
pass
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Any ):
pass
def __snake_case ( self : int ):
pass
def __snake_case ( self : Dict ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCAmelCase__ = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase__ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
lowerCAmelCase__ = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase__ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCAmelCase__ = 0
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE_ , attr + '''_id''' , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , attr + '''_id''' ) , SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , attr + '''_id''' , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , attr + '''_id''' ) , SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , '''additional_special_tokens_ids''' ) , [] )
setattr(SCREAMING_SNAKE_CASE_ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 668 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 1 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class a :
def __init__( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any=None , __lowerCAmelCase : int=None ):
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = val
_UpperCAmelCase = (start + end) // 2
_UpperCAmelCase = left
_UpperCAmelCase = right
def __repr__( self : int ):
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class a :
def __init__( self : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = collection
_UpperCAmelCase = function
if self.collection:
_UpperCAmelCase = self._build_tree(0 , len(lowercase__ ) - 1 )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ):
self._update_tree(self.root , lowercase__ , lowercase__ )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ):
return self._query_range(self.root , lowercase__ , lowercase__ )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : str ):
if start == end:
return SegmentTreeNode(lowercase__ , lowercase__ , self.collection[start] )
_UpperCAmelCase = (start + end) // 2
_UpperCAmelCase = self._build_tree(lowercase__ , lowercase__ )
_UpperCAmelCase = self._build_tree(mid + 1 , lowercase__ )
return SegmentTreeNode(lowercase__ , lowercase__ , self.fn(left.val , right.val ) , lowercase__ , lowercase__ )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
if node.start == i and node.end == i:
_UpperCAmelCase = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase__ , lowercase__ )
else:
self._update_tree(node.right , lowercase__ , lowercase__ )
_UpperCAmelCase = self.fn(node.left.val , node.right.val )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase__ , lowercase__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase__ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase__ ) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase__ , lowercase__ )
def lowerCAmelCase_ ( self : Any ):
if self.root is not None:
_UpperCAmelCase = Queue()
queue.put(self.root )
while not queue.empty():
_UpperCAmelCase = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 5_0)
UpperCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 703 | """simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = StableUnCLIPPipeline
_snake_case : List[str] = TEXT_TO_IMAGE_PARAMS
_snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_snake_case : str = False
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = 32
_UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase )
_UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 275 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
for attribute in key.split(""".""" ):
A_ : str = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
A_ : List[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
A_ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A_ : Optional[Any] = value
elif weight_type == "weight_g":
A_ : List[Any] = value
elif weight_type == "weight_v":
A_ : Tuple = value
elif weight_type == "bias":
A_ : int = value
else:
A_ : Union[str, Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : List[Any] = []
A_ : Any = fairseq_model.state_dict()
A_ : List[Any] = hf_model.feature_extractor
A_ : Optional[Any] = hf_model.adapter
for name, value in fairseq_dict.items():
A_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
A_ : int = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A_ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A_ : List[Any] = True
if "*" in mapped_key:
A_ : Dict = name.split(snake_case__ )[0].split(""".""" )[-2]
A_ : List[Any] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
A_ : str = """weight_g"""
elif "weight_v" in name:
A_ : Union[str, Any] = """weight_v"""
elif "bias" in name:
A_ : str = """bias"""
elif "weight" in name:
A_ : Optional[Any] = """weight"""
else:
A_ : Tuple = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
A_ : Dict = full_name.split("""conv_layers.""" )[-1]
A_ : Any = name.split(""".""" )
A_ : Optional[int] = int(items[0] )
A_ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A_ : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A_ : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A_ : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A_ : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
A_ : Optional[Any] = full_name.split("""adaptor.""" )[-1]
A_ : Dict = name.split(""".""" )
if items[1].isdigit():
A_ : Optional[int] = int(items[1] )
else:
A_ : Optional[Any] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
A_ : List[Any] = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
A_ : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
A_ : Optional[int] = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
A_ : Optional[int] = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(snake_case__ , snake_case__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
A_ : Optional[Any] = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
A_ : Tuple = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case__ )
def __UpperCamelCase ( snake_case__ ):
A_ , A_ : Optional[Any] = emb.weight.shape
A_ : int = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A_ : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
A_ : Any = WavaVecaConfig.from_pretrained(
snake_case__ , add_adapter=snake_case__ , adapter_stride=snake_case__ , adapter_kernel_size=snake_case__ , use_auth_token=snake_case__ , output_hidden_size=snake_case__ , )
A_ : Optional[Any] = MBartConfig.from_pretrained(snake_case__ )
# load model
A_ , A_ , A_ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
A_ : Optional[Any] = model[0].eval()
# load feature extractor
A_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(snake_case__ , use_auth_token=snake_case__ )
# set weights for wav2vec2 encoder
A_ : List[str] = WavaVecaModel(snake_case__ )
recursively_load_weights_wavaveca(model.encoder , snake_case__ )
# load decoder weights
A_ : List[Any] = MBartForCausalLM(snake_case__ )
A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
A_ : Optional[Any] = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
A_ : Any = False
A_ : Union[str, Any] = MBartaaTokenizer(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
A_ : Optional[Any] = hf_wavavec.config.to_dict()
A_ : Optional[Any] = tokenizer.pad_token_id
A_ : int = tokenizer.bos_token_id
A_ : List[Any] = tokenizer.eos_token_id
A_ : str = """mbart50"""
A_ : Any = """wav2vec2"""
A_ : List[str] = tokenizer.eos_token_id
A_ : Dict = 250_004
A_ : Tuple = tokenizer.eos_token_id
A_ : List[str] = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 180 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[Any] = """informer"""
_A : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__(self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "student_t" , lowerCAmelCase_ = "nll" , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = "mean" , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = True , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.05 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 100 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_=True , lowerCAmelCase_ = "prob" , lowerCAmelCase_ = 5 , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
# time series specific configuration
A_ : Optional[Any] = prediction_length
A_ : Dict = context_length or prediction_length
A_ : Dict = distribution_output
A_ : Tuple = loss
A_ : Dict = input_size
A_ : Union[str, Any] = num_time_features
A_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : Optional[int] = scaling
A_ : Optional[Any] = num_dynamic_real_features
A_ : Tuple = num_static_real_features
A_ : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A_ : List[str] = cardinality
else:
A_ : List[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A_ : int = embedding_dimension
else:
A_ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : Dict = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : Any = encoder_ffn_dim
A_ : Tuple = decoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : Optional[int] = decoder_layers
A_ : List[str] = dropout
A_ : List[str] = attention_dropout
A_ : Any = activation_dropout
A_ : Any = encoder_layerdrop
A_ : List[Any] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[Any] = init_std
A_ : Optional[int] = use_cache
# Informer
A_ : Dict = attention_type
A_ : List[Any] = sampling_factor
A_ : List[Any] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase(self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 180 | 1 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 1_00 ):
"""simple docstring"""
_a = (n * (n + 1) // 2) ** 2
_a = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }') | 285 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Any=False ):
"""simple docstring"""
_a = '''backbone.''' if is_semantic else ''''''
_a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', '''beit.embeddings.cls_token'''),
(f'{prefix}patch_embed.proj.weight', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'{prefix}patch_embed.proj.bias', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'{prefix}pos_embed', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Any=False, _lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_a = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
_a = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
_a = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
_a = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
_a = in_proj_weight[
: config.hidden_size, :
]
_a = q_bias
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_a = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
_a = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
_a = gamma_a
_a = gamma_a
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any, _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = dct.pop(_lowerCAmelCase )
_a = val
def A_ ( ):
"""simple docstring"""
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_a = False if '''rvlcdip''' in checkpoint_url else True
_a = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase, use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_a = 10_24
_a = 40_96
_a = 24
_a = 16
# labels
if "rvlcdip" in checkpoint_url:
_a = 16
_a = '''huggingface/label-files'''
_a = '''rvlcdip-id2label.json'''
_a = json.load(open(hf_hub_download(_lowerCAmelCase, _lowerCAmelCase, repo_type='''dataset''' ), '''r''' ) )
_a = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_a = torch.hub.load_state_dict_from_url(_lowerCAmelCase, map_location='''cpu''' )['''model''']
_a = create_rename_keys(_lowerCAmelCase, has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase, _lowerCAmelCase, has_lm_head=_lowerCAmelCase )
# load HuggingFace model
_a = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
_a = BeitImageProcessor(
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=_lowerCAmelCase )
_a = prepare_img()
_a = image_processor(images=_lowerCAmelCase, return_tensors='''pt''' )
_a = encoding['''pixel_values''']
_a = model(_lowerCAmelCase )
_a = outputs.logits
# verify logits
_a = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
_a = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
_a = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase, _lowerCAmelCase ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=_lowerCAmelCase, )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase, _lowerCAmelCase ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=_lowerCAmelCase, )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
__snake_case = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 285 | 1 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE : Union[str, Any] = namedtuple("covid_data", "cases deaths recovered")
def _UpperCamelCase ( lowerCAmelCase__: str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
SCREAMING_SNAKE_CASE_ = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(lowerCAmelCase__ ).content ).xpath(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 294 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger()
def _UpperCamelCase ( lowerCAmelCase__: int ,lowerCAmelCase__: str ,lowerCAmelCase__: LevitConfig ,lowerCAmelCase__: Path ,lowerCAmelCase__: bool = True ) -> Optional[int]:
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
SCREAMING_SNAKE_CASE_ = timm.create_model('levit_128s' ,pretrained=lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ = timm.create_model('levit_128' ,pretrained=lowerCAmelCase__ )
if hidden_sizes == 192:
SCREAMING_SNAKE_CASE_ = timm.create_model('levit_192' ,pretrained=lowerCAmelCase__ )
if hidden_sizes == 256:
SCREAMING_SNAKE_CASE_ = timm.create_model('levit_256' ,pretrained=lowerCAmelCase__ )
if hidden_sizes == 384:
SCREAMING_SNAKE_CASE_ = timm.create_model('levit_384' ,pretrained=lowerCAmelCase__ )
from_model.eval()
SCREAMING_SNAKE_CASE_ = LevitForImageClassificationWithTeacher(lowerCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE_ = OrderedDict()
SCREAMING_SNAKE_CASE_ = from_model.state_dict()
SCREAMING_SNAKE_CASE_ = list(from_model.state_dict().keys() )
SCREAMING_SNAKE_CASE_ = list(our_model.state_dict().keys() )
print(len(lowerCAmelCase__ ) ,len(lowerCAmelCase__ ) )
for i in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE_ = weights[og_keys[i]]
our_model.load_state_dict(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = torch.randn((2, 3, 224, 224) )
SCREAMING_SNAKE_CASE_ = from_model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = our_model(lowerCAmelCase__ ).logits
assert torch.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE_ = name
print(lowerCAmelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
SCREAMING_SNAKE_CASE_ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def _UpperCamelCase ( lowerCAmelCase__: Path ,lowerCAmelCase__: str = None ,lowerCAmelCase__: bool = True ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ = 1000
SCREAMING_SNAKE_CASE_ = (1, num_labels)
SCREAMING_SNAKE_CASE_ = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='dataset' ) ,'r' ) )
SCREAMING_SNAKE_CASE_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = partial(lowerCAmelCase__ ,num_labels=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
SCREAMING_SNAKE_CASE_ = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,lowerCAmelCase__ ,names_to_config[model_name] ,lowerCAmelCase__ ,lowerCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 294 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( _lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =MgpstrTokenizer
UpperCamelCase__ =False
UpperCamelCase__ ={}
UpperCamelCase__ =False
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
super().setUp()
# fmt: off
__magic_name__ : Optional[int] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__magic_name__ : Dict = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
__magic_name__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
def UpperCAmelCase__ ( self : Union[str, Any] , **lowerCamelCase_ : List[str] ) -> Tuple:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : int , lowerCamelCase_ : List[Any] ) -> Any:
__magic_name__ : str = '''tester'''
__magic_name__ : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
__magic_name__ : int = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ : Dict = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__magic_name__ : Optional[Any] = tokenizer.encode([special_token] , add_special_tokens=lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
__magic_name__ : List[Any] = tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self : Dict ) -> str:
__magic_name__ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ , __magic_name__ : int = self.get_input_output_texts(lowerCamelCase_ )
__magic_name__ : int = tokenizer.tokenize(lowerCamelCase_ )
__magic_name__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
__magic_name__ : Optional[Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Tuple = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertNotEqual(len(lowerCamelCase_ ) , 0 )
__magic_name__ : List[Any] = tokenizer.decode(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , lowerCamelCase_ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
pass
| 501 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Dict[Optional[str], Type[Formatter]] = {}
__lowerCamelCase : Dict[Optional[str], str] = {}
__lowerCamelCase : Dict[Optional[str], Exception] = {}
def lowercase__ ( __A: type ,__A: Optional[str] ,__A: Optional[List[str]] = None ,):
'''simple docstring'''
__magic_name__ : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__magic_name__ : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__magic_name__ : Optional[int] = format_type
def lowercase__ ( __A: Exception ,__A: Optional[str] ,__A: Optional[List[str]] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__magic_name__ : List[str] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__lowerCamelCase : str = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__lowerCamelCase : int = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__lowerCamelCase : str = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def lowercase__ ( __A: Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowercase__ ( __A: Optional[str] ,**__A: Any ):
'''simple docstring'''
__magic_name__ : Tuple = get_format_type_from_alias(__A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 501 | 1 |
'''simple docstring'''
from typing import List
import numpy as np
def A (__lowerCamelCase :dict ):
_lowerCAmelCase = {key: len(__lowerCamelCase ) for key, value in gen_kwargs.items() if isinstance(__lowerCamelCase , __lowerCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
_lowerCAmelCase = max(lists_lengths.values() , default=0 )
return max(1 , __lowerCamelCase )
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
_lowerCAmelCase = []
for group_idx in range(__lowerCamelCase ):
_lowerCAmelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCAmelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCAmelCase = range(__lowerCamelCase , start + num_shards_to_add )
shards_indices_per_group.append(__lowerCamelCase )
return shards_indices_per_group
def A (__lowerCamelCase :dict , __lowerCamelCase :int ):
_lowerCAmelCase = _number_of_shards_in_gen_kwargs(__lowerCamelCase )
if num_shards == 1:
return [dict(__lowerCamelCase )]
else:
_lowerCAmelCase = _distribute_shards(num_shards=__lowerCamelCase , max_num_jobs=__lowerCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowerCamelCase , __lowerCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowerCamelCase ) )
]
def A (__lowerCamelCase :List[dict] ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __lowerCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A (__lowerCamelCase :np.random.Generator , __lowerCamelCase :dict ):
_lowerCAmelCase = {len(__lowerCamelCase ) for value in gen_kwargs.values() if isinstance(__lowerCamelCase , __lowerCamelCase )}
_lowerCAmelCase = {}
for size in list_sizes:
_lowerCAmelCase = list(range(__lowerCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCAmelCase = dict(__lowerCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = [value[i] for i in indices_per_size[len(__lowerCamelCase )]]
return shuffled_kwargs
| 5 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : int = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def A( self , lowercase__=0):
__UpperCAmelCase : str = np.random.RandomState(lowercase__)
__UpperCAmelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def A( self):
__UpperCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Tuple = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**lowercase__).images
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Optional[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**lowercase__).images
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : List[str] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : Union[str, Any] = pipe(**lowercase__).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**lowercase__).images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : int = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Dict = self.get_dummy_inputs()
__UpperCAmelCase : Optional[int] = pipe(**lowercase__).images
__UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Dict = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**lowercase__).images
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Dict = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Any = self.get_dummy_inputs()
__UpperCAmelCase : List[Any] = 3 * [inputs['''prompt''']]
# forward
__UpperCAmelCase : Dict = pipe(**lowercase__)
__UpperCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : Dict = 3 * [inputs.pop('''prompt''')]
__UpperCAmelCase : Tuple = pipe.tokenizer(
lowercase__ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors='''np''' , )
__UpperCAmelCase : Optional[Any] = text_inputs['''input_ids''']
__UpperCAmelCase : Union[str, Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
__UpperCAmelCase : Union[str, Any] = prompt_embeds
# forward
__UpperCAmelCase : Union[str, Any] = pipe(**lowercase__)
__UpperCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
def A( self):
__UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : int = self.get_dummy_inputs()
__UpperCAmelCase : Any = 3 * ['''this is a negative prompt''']
__UpperCAmelCase : Optional[int] = negative_prompt
__UpperCAmelCase : List[Any] = 3 * [inputs['''prompt''']]
# forward
__UpperCAmelCase : Any = pipe(**lowercase__)
__UpperCAmelCase : str = output.images[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
__UpperCAmelCase : Any = 3 * [inputs.pop('''prompt''')]
__UpperCAmelCase : Optional[int] = []
for p in [prompt, negative_prompt]:
__UpperCAmelCase : str = pipe.tokenizer(
lowercase__ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors='''np''' , )
__UpperCAmelCase : List[Any] = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = embeds
# forward
__UpperCAmelCase : Union[str, Any] = pipe(**lowercase__)
__UpperCAmelCase : str = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
@property
def A( self):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A( self):
__UpperCAmelCase : Optional[Any] = ort.SessionOptions()
__UpperCAmelCase : List[Any] = False
return options
def A( self):
# using the PNDM scheduler by default
__UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : int = '''A painting of a squirrel eating a burger'''
np.random.seed(0)
__UpperCAmelCase : Dict = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='''np''')
__UpperCAmelCase : Dict = output.images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase : List[str] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A( self):
__UpperCAmelCase : Optional[Any] = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
__UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Any = '''open neural network exchange'''
__UpperCAmelCase : Any = np.random.RandomState(0)
__UpperCAmelCase : List[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase__ , output_type='''np''')
__UpperCAmelCase : List[str] = output.images
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase : int = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A( self):
__UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
__UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Optional[Any] = '''open neural network exchange'''
__UpperCAmelCase : Optional[int] = np.random.RandomState(0)
__UpperCAmelCase : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase__ , output_type='''np''')
__UpperCAmelCase : Union[str, Any] = output.images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase : Optional[int] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A( self):
__UpperCAmelCase : Tuple = 0
def test_callback_fn(lowercase__ , lowercase__ , lowercase__) -> None:
__UpperCAmelCase : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[Any] = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
__UpperCAmelCase : Dict = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
__UpperCAmelCase : str = False
__UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = '''Andromeda galaxy in a bottle'''
__UpperCAmelCase : List[Any] = np.random.RandomState(0)
pipe(
prompt=lowercase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowercase__ , callback=lowercase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def A( self):
__UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowercase__ , lowercase__)
assert pipe.safety_checker is None
__UpperCAmelCase : Union[str, Any] = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(lowercase__)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCAmelCase : Optional[int] = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
| 462 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__a : Optional[int] = logging.getLogger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowercase__ : Optional[Any] = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = {
"repo_id": str(SCREAMING_SNAKE_CASE_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(SCREAMING_SNAKE_CASE_ ,"git_log.json" ) ,"w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,indent=4 )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if params.n_gpu <= 0:
lowercase__ : Dict = 0
lowercase__ : Dict = -1
lowercase__ : Any = True
lowercase__ : Tuple = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase__ : Dict = int(os.environ["WORLD_SIZE"] )
lowercase__ : Optional[int] = int(os.environ["N_GPU_NODE"] )
lowercase__ : Any = int(os.environ["RANK"] )
# number of nodes / node ID
lowercase__ : Dict = params.world_size // params.n_gpu_per_node
lowercase__ : List[str] = params.global_rank // params.n_gpu_per_node
lowercase__ : int = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase__ : List[str] = 1
lowercase__ : Any = 0
lowercase__ : Optional[int] = 0
lowercase__ : List[Any] = 0
lowercase__ : Tuple = 1
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase__ : Tuple = params.node_id == 0 and params.local_rank == 0
lowercase__ : str = params.n_nodes > 1
# summary
lowercase__ : Optional[Any] = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" ,backend="nccl" ,)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed ) | 711 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a : int = logging.get_logger(__name__)
__a : Tuple = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a : Optional[Any] = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__a : Any = {'''facebook/blenderbot_small-90M''': 5_1_2}
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : str = set()
lowercase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Any = char
lowercase__ : int = set(SCREAMING_SNAKE_CASE_ )
return pairs
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Tuple = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="__start__" , lowerCamelCase="__end__" , lowerCamelCase="__unk__" , lowerCamelCase="__null__" , **lowerCamelCase , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
lowercase__ : Optional[int] = json.load(lowerCamelCase )
lowercase__ : List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
lowercase__ : Any = merges_handle.read().split("\n" )[1:-1]
lowercase__ : Dict = [tuple(merge.split() ) for merge in merges]
lowercase__ : Any = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
lowercase__ : Dict = {}
@property
def __a ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ : str = re.sub("([.,!?()])" , r" \1" , lowerCamelCase )
lowercase__ : Dict = re.sub("(')" , r" \1 " , lowerCamelCase )
lowercase__ : Union[str, Any] = re.sub(r"\s{2,}" , " " , lowerCamelCase )
if "\n" in token:
lowercase__ : Optional[Any] = token.replace("\n" , " __newln__" )
lowercase__ : Optional[Any] = token.split(" " )
lowercase__ : Union[str, Any] = []
for token in tokens:
if not len(lowerCamelCase ):
continue
lowercase__ : Union[str, Any] = token.lower()
lowercase__ : Any = tuple(lowerCamelCase )
lowercase__ : Tuple = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowercase__ : Optional[int] = get_pairs(lowerCamelCase )
if not pairs:
words.append(lowerCamelCase )
continue
while True:
lowercase__ : str = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : int = bigram
lowercase__ : str = []
lowercase__ : int = 0
while i < len(lowerCamelCase ):
try:
lowercase__ : List[str] = word.index(lowerCamelCase , lowerCamelCase )
new_word.extend(word[i:j] )
lowercase__ : Tuple = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : int = tuple(lowerCamelCase )
lowercase__ : Tuple = new_word
if len(lowerCamelCase ) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(lowerCamelCase )
lowercase__ : Tuple = "@@ ".join(lowerCamelCase )
lowercase__ : Optional[Any] = word[:-4]
lowercase__ : int = word
words.append(lowerCamelCase )
return " ".join(lowerCamelCase )
def __a ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = []
lowercase__ : Dict = re.findall(r"\S+\n?" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(" " ) ) )
return split_tokens
def __a ( self , lowerCamelCase ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = token.lower()
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.decoder.get(lowerCamelCase , self.unk_token )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = " ".join(lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : str = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
lowercase__ : List[Any] = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase__ : str = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file | 298 | 0 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_snake_case = get_logger(__name__)
class UpperCamelCase :
UpperCamelCase : Union[str, Any] = '''dummy_data'''
UpperCamelCase : str = '''datasets'''
UpperCamelCase : Dict = False
def __init__( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[Version, str] , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[List[Callable]] = None , ) -> List[Any]:
_a : Dict = 0
_a : Any = dataset_name
_a : Optional[int] = cache_dir
_a : List[str] = use_local_dummy_data
_a : List[Any] = config
# download_callbacks take a single url as input
_a : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_a : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_a : Dict = str(UpperCAmelCase__ )
# to be downloaded
_a : Tuple = None
_a : Union[str, Any] = None
@property
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
if self._dummy_file is None:
_a : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def _lowercase ( self : Any ) -> Any:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def _lowercase ( self : Optional[int] ) -> List[str]:
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def _lowercase ( self : Tuple ) -> str:
_a : Union[str, Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_a : Optional[int] = cached_path(
UpperCAmelCase__ , cache_dir=self.cache_dir , extract_compressed_file=UpperCAmelCase__ , force_extract=UpperCAmelCase__ )
return os.path.join(UpperCAmelCase__ , self.dummy_file_name )
@property
def _lowercase ( self : Any ) -> Union[str, Any]:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _lowercase ( self : Dict ) -> Union[str, Any]:
if self._bucket_url is None:
_a : int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def _lowercase ( self : List[str] ) -> Any:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str , *UpperCAmelCase__ : List[str] ) -> str:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_a : Optional[int] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_a : Tuple = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return self.create_dummy_data_dict(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , (list, tuple) ):
return self.create_dummy_data_list(UpperCAmelCase__ , UpperCAmelCase__ )
else:
return self.create_dummy_data_single(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]:
return self.download_and_extract(UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> Tuple:
return self.download_and_extract(UpperCAmelCase__ )
def _lowercase ( self : str , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
return path
def _lowercase ( self : List[str] ) -> Dict:
return {}
def _lowercase ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
_a : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
for single_url in single_urls:
download_callback(UpperCAmelCase__ )
else:
_a : str = single_urls
download_callback(UpperCAmelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Optional[int] = [os.path.join(UpperCAmelCase__ , urllib.parse.quote_plus(Path(UpperCAmelCase__ ).name ) ) for x in single_urls]
else:
_a : List[Any] = single_urls
_a : Dict = os.path.join(UpperCAmelCase__ , urllib.parse.quote_plus(Path(UpperCAmelCase__ ).name ) )
_a : List[Any] = value
# make sure that values are unique
if all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_a : Optional[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> List[Any]:
_a : Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_a : str = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCAmelCase__ ) ) for url in data_url )
_a : List[str] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_a : Tuple = [data_url[0]] * len(UpperCAmelCase__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a : Tuple = os.path.join(UpperCAmelCase__ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCAmelCase__ )
return dummy_data_list
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any ) -> Union[str, Any]:
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a : Dict = os.path.join(UpperCAmelCase__ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCAmelCase__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowercase ( self : Dict ) -> Tuple:
pass
def _lowercase ( self : Tuple ) -> Tuple:
pass
def _lowercase ( self : str , UpperCAmelCase__ : Any ) -> Optional[Any]:
def _iter_archive_members(UpperCAmelCase__ : List[str] ):
# this preserves the order of the members inside the ZIP archive
_a : str = Path(self.dummy_file ).parent
_a : int = path.relative_to(UpperCAmelCase__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_a : List[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCAmelCase__ )
_a : Union[str, Any] = Path(UpperCAmelCase__ )
_a : List[str] = _iter_archive_members(UpperCAmelCase__ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCAmelCase__ ).as_posix(), file_path.open("""rb""" )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : int ) -> Optional[Any]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : int = [paths]
for path in paths:
if os.path.isfile(UpperCAmelCase__ ):
if os.path.basename(UpperCAmelCase__ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCAmelCase__ ):
if os.path.basename(UpperCAmelCase__ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCAmelCase__ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
| 389 |
"""simple docstring"""
_snake_case = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
_snake_case = ['a', 'b', 'c', 'd', 'e']
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = start
# add current to visited
visited.append(UpperCamelCase__ )
_a : List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_a : Any = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
_a : Any = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
_snake_case = topological_sort('a', [], [])
print(sort)
| 389 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 526 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def snake_case_ ( *a__ , **a__):
pass
def lowerCAmelCase__ ( UpperCamelCase_ : Image )-> str:
A__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case_ ( self , a__ , a__ , a__):
A__ = DepthEstimationPipeline(model=a__ , image_processor=a__)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case_ ( self , a__ , a__):
A__ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , a__)
import datasets
A__ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''')
A__ = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
])
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
] , a__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''')
def snake_case_ ( self):
pass
@slow
@require_torch
def snake_case_ ( self):
A__ = '''Intel/dpt-large'''
A__ = pipeline('''depth-estimation''' , model=a__)
A__ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''')
A__ = hashimage(outputs['''depth'''])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 2_9.3_0_4)
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.6_6_2)
@require_torch
def snake_case_ ( self):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
| 526 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : Optional[NestedDataStructureLike[PathLike]] = None , lowercase__ : Optional[NamedSplit] = None , lowercase__ : Optional[Features] = None , lowercase__ : str = None , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : Optional[int] = None , **lowercase__ : Tuple , ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Any = path_or_paths
_UpperCamelCase : Dict = split if split or isinstance(__lowercase , __lowercase ) else '''train'''
_UpperCamelCase : Dict = features
_UpperCamelCase : Union[str, Any] = cache_dir
_UpperCamelCase : Tuple = keep_in_memory
_UpperCamelCase : str = streaming
_UpperCamelCase : str = num_proc
_UpperCamelCase : Tuple = kwargs
@abstractmethod
def snake_case__ ( self : List[str] ) ->Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase__ : Optional[Features] = None , lowercase__ : str = None , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : Optional[int] = None , **lowercase__ : List[Any] , ) ->Any:
'''simple docstring'''
_UpperCamelCase : Any = features
_UpperCamelCase : str = cache_dir
_UpperCamelCase : Tuple = keep_in_memory
_UpperCamelCase : Optional[int] = streaming
_UpperCamelCase : List[str] = num_proc
_UpperCamelCase : str = kwargs
@abstractmethod
def snake_case__ ( self : Any ) ->Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 435 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = KandinskyVaaPriorPipeline
snake_case_ = ["""prompt"""]
snake_case_ = ["""prompt""", """negative_prompt"""]
snake_case_ = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
snake_case_ = False
@property
def __magic_name__ ( self : Optional[Any] ) -> Any:
return 32
@property
def __magic_name__ ( self : int ) -> Dict:
return 32
@property
def __magic_name__ ( self : Tuple ) -> Optional[int]:
return self.time_input_dim
@property
def __magic_name__ ( self : str ) -> Any:
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Optional[Any] ) -> str:
return 1_00
@property
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __magic_name__ ( self : int ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def __magic_name__ ( self : Optional[Any] ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
SCREAMING_SNAKE_CASE__ : List[Any] =PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
SCREAMING_SNAKE_CASE__ : Optional[Any] =nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __magic_name__ ( self : Optional[int] ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =CLIPVisionModelWithProjection(__lowercase )
return model
@property
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any =self.dummy_prior
SCREAMING_SNAKE_CASE__ : str =self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Any =self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Optional[int] =self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] =UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=__lowercase , clip_sample_range=10.0 , )
SCREAMING_SNAKE_CASE__ : str ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __magic_name__ ( self : Dict , __lowercase : Dict , __lowercase : Any=0 ) -> int:
if str(__lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Tuple =torch.manual_seed(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] ={
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : str ='''cpu'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : int =pipe(**self.get_dummy_inputs(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] =output.image_embeds
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : Any =image[0, -10:]
SCREAMING_SNAKE_CASE__ : Optional[int] =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
SCREAMING_SNAKE_CASE__ : List[Any] =np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Dict =True
SCREAMING_SNAKE_CASE__ : List[Any] =False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def __magic_name__ ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : List[Any] =torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ : Tuple =False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , ) | 296 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCamelCase__ = {
'''allenai/led-base-16384''': 1_63_84,
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = LEDTokenizer
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _a=None , _a=None , _a=None , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , _a=True , **_a , ) -> Union[str, Any]:
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
lowerCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _a ) != add_prefix_space:
lowerCAmelCase_ = getattr(_a , pre_tok_state.pop("type" ) )
lowerCAmelCase_ = add_prefix_space
lowerCAmelCase_ = pre_tok_class(**_a )
lowerCAmelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase_ = "post_processor"
lowerCAmelCase_ = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
lowerCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase_ = tuple(state["cls"] )
lowerCAmelCase_ = False
if state.get("add_prefix_space" , _a ) != add_prefix_space:
lowerCAmelCase_ = add_prefix_space
lowerCAmelCase_ = True
if state.get("trim_offsets" , _a ) != trim_offsets:
lowerCAmelCase_ = trim_offsets
lowerCAmelCase_ = True
if changes_to_apply:
lowerCAmelCase_ = getattr(_a , state.pop("type" ) )
lowerCAmelCase_ = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __a ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self , _a ) -> int:
lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
lowerCAmelCase_ = value
def __a ( self , *_a , **_a ) -> BatchEncoding:
lowerCAmelCase_ = kwargs.get("is_split_into_words" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_a , **_a )
def __a ( self , *_a , **_a ) -> BatchEncoding:
lowerCAmelCase_ = kwargs.get("is_split_into_words" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*_a , **_a )
def __a ( self , _a , _a = None ) -> Tuple[str]:
lowerCAmelCase_ = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __a ( self , _a , _a=None ) -> int:
lowerCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , _a , _a = None , _a = PaddingStrategy.DO_NOT_PAD , _a = None , _a = None , ) -> dict:
lowerCAmelCase_ = super()._pad(
encoded_inputs=_a , max_length=_a , padding_strategy=_a , pad_to_multiple_of=_a , return_attention_mask=_a , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase_ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase_ = len(encoded_inputs["global_attention_mask"] ) != len(_a )
if needs_to_be_padded:
lowerCAmelCase_ = len(_a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase_ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase_ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 226 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ (unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a , config_name=_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoConfig.from_pretrained("gpt2" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
lowerCAmelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a , _a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = {
"max_new_tokens": 1024,
"foo": "bar",
}
lowerCAmelCase_ = copy.deepcopy(_a )
lowerCAmelCase_ = generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a , _a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a , {"foo": "bar"} )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
assert not hasattr(_a , "foo" ) # no new kwargs should be initialized if from config
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ (unittest.TestCase ):
@classmethod
def __a ( cls ) -> Optional[Any]:
lowerCAmelCase_ = TOKEN
HfFolder.save_token(_a )
@classmethod
def __a ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="test-generation-config" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="valid_org/test-generation-config-org" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
| 226 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
_SCREAMING_SNAKE_CASE = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
_SCREAMING_SNAKE_CASE = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
_SCREAMING_SNAKE_CASE = reader.read()
_SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
_SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
_SCREAMING_SNAKE_CASE = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
_SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_SCREAMING_SNAKE_CASE = config[key]
del config[key]
_SCREAMING_SNAKE_CASE = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
_SCREAMING_SNAKE_CASE = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
_SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
_SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
_SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
_SCREAMING_SNAKE_CASE = param_value
_SCREAMING_SNAKE_CASE = True
if not has_changed:
_SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 537 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class a ( __UpperCAmelCase ):
lowercase_ : Any = 'roberta-prelayernorm'
def __init__( self : Any , snake_case__ : Any=50_265 , snake_case__ : Any=768 , snake_case__ : Optional[Any]=12 , snake_case__ : Tuple=12 , snake_case__ : Union[str, Any]=3_072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[str]=512 , snake_case__ : Any=2 , snake_case__ : List[Any]=0.0_2 , snake_case__ : Tuple=1E-12 , snake_case__ : List[str]=1 , snake_case__ : Optional[int]=0 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]="absolute" , snake_case__ : List[Any]=True , snake_case__ : Union[str, Any]=None , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a ( __UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 611 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase_ :
def __init__( self , lowercase_ ):
_snake_case : Optional[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_snake_case : int = len(lowercase_ ) - 1
def UpperCamelCase ( self , lowercase_ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_snake_case : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowercase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase_ ) , 5 ) == 1
return output_values
def UpperCamelCase ( self , lowercase_ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_snake_case : List[str] = self.basis_function(lowercase_ )
_snake_case : str = 0.0
_snake_case : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCamelCase ( self , lowercase_ = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_snake_case : list[float] = [] # x coordinates of points to plot
_snake_case : list[float] = [] # y coordinates of points to plot
_snake_case : int = 0.0
while t <= 1:
_snake_case : Optional[Any] = self.bezier_curve_function(lowercase_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_snake_case : Optional[Any] = [i[0] for i in self.list_of_points]
_snake_case : str = [i[1] for i in self.list_of_points]
plt.plot(
lowercase_ , lowercase_ , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(lowercase_ , lowercase_ , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 704 | import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
__SCREAMING_SNAKE_CASE : int = 'bert-base-cased'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'fp16'
__SCREAMING_SNAKE_CASE : str = 'bf16'
__SCREAMING_SNAKE_CASE : Optional[int] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Optional[int] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowercase_ ):
_snake_case : Optional[Any] = self.dist_env.copy()
_snake_case : List[str] = f"""{i + 1}"""
_snake_case : int = strategy
with mockenv_context(**lowercase_ ):
_snake_case : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowercase_ ):
_snake_case : List[str] = self.dist_env.copy()
_snake_case : List[Any] = prefetch_policy
with mockenv_context(**lowercase_ ):
_snake_case : List[str] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowercase_ ):
_snake_case : str = self.dist_env.copy()
_snake_case : List[str] = state_dict_type
with mockenv_context(**lowercase_ ):
_snake_case : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCamelCase ( self ):
_snake_case : Tuple = AutoModel.from_pretrained(lowercase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
_snake_case : Optional[Any] = self.dist_env.copy()
_snake_case : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
_snake_case : List[str] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
_snake_case : str = "2000"
with mockenv_context(**lowercase_ ):
_snake_case : List[str] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_snake_case : str = self.dist_env.copy()
_snake_case : Tuple = "TRANSFORMER_BASED_WRAP"
_snake_case : Union[str, Any] = "T5Layer"
with mockenv_context(**lowercase_ ):
_snake_case : Optional[int] = FullyShardedDataParallelPlugin()
with self.assertRaises(lowercase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
_snake_case : str = self.dist_env.copy()
_snake_case : Any = "SIZE_BASED_WRAP"
_snake_case : str = "0"
with mockenv_context(**lowercase_ ):
_snake_case : Optional[int] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_snake_case : Union[str, Any] = self.dist_env.copy()
_snake_case : int = mp_dtype
with mockenv_context(**lowercase_ ):
_snake_case : str = Accelerator()
if mp_dtype == "fp16":
_snake_case : List[str] = torch.floataa
elif mp_dtype == "bf16":
_snake_case : Any = torch.bfloataa
_snake_case : Dict = MixedPrecision(param_dtype=lowercase_ , reduce_dtype=lowercase_ , buffer_dtype=lowercase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowercase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowercase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowercase_ )
def UpperCamelCase ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_snake_case : Union[str, Any] = self.dist_env.copy()
_snake_case : Tuple = str(lowercase_ ).lower()
with mockenv_context(**lowercase_ ):
_snake_case : Dict = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowercase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Dict = 0.82
_snake_case : str = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
_snake_case : Tuple = {
"multi_gpu_fp16": 3_200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2_000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_snake_case : Tuple = 160
_snake_case : Optional[int] = 160
_snake_case : Optional[Any] = inspect.getfile(accelerate.test_utils )
_snake_case : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = os.path.join(self.test_scripts_folder , "test_performance.py" )
_snake_case : int = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
_snake_case : str = cmd.copy()
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def UpperCamelCase ( self ):
_snake_case : Tuple = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
_snake_case : str = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(lowercase_ ):
_snake_case : str = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_snake_case : int = len(lowercase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_snake_case : int = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
_snake_case : Union[str, Any] = cmd_config[:-1]
_snake_case : Dict = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def UpperCamelCase ( self ):
_snake_case : List[Any] = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
_snake_case : Any = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_snake_case : Tuple = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() ) | 580 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """open-llama"""
def __init__( self , A_=10_0000 , A_=4096 , A_=1_1008 , A_=32 , A_=32 , A_="silu" , A_=2048 , A_=0.02 , A_=1e-6 , A_=True , A_=0 , A_=1 , A_=2 , A_=False , A_=True , A_=0.1 , A_=0.1 , A_=True , A_=True , A_=None , **A_ , ) ->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : List[Any] = hidden_size
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : Optional[Any] = num_hidden_layers
__lowerCAmelCase : Dict = num_attention_heads
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Union[str, Any] = rms_norm_eps
__lowerCAmelCase : Any = use_cache
__lowerCAmelCase : List[str] = kwargs.pop(
'''use_memorry_efficient_attention''' , A_ )
__lowerCAmelCase : Tuple = hidden_dropout_prob
__lowerCAmelCase : List[Any] = attention_dropout_prob
__lowerCAmelCase : Union[str, Any] = use_stable_embedding
__lowerCAmelCase : Dict = shared_input_output_embedding
__lowerCAmelCase : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ , )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__lowerCAmelCase : Optional[Any] = self.rope_scaling.get('''type''' , A_ )
__lowerCAmelCase : int = self.rope_scaling.get('''factor''' , A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 492 |
def _lowercase ( lowercase__ ):
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
__lowerCAmelCase : str = sum(lowercase__ ) / len(lowercase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 492 | 1 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def snake_case__ ( a , a , a , a=1024 ) -> Dict:
'''simple docstring'''
snake_case__ , snake_case__ = [], []
snake_case__ = list(zip(a , a ) )
snake_case__ , snake_case__ = sorted_examples[0]
def is_too_big(a ):
return tok(a , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case__ = new_src + """ """ + src
snake_case__ = new_tgt + """ """ + tgt
if is_too_big(a ) or is_too_big(a ): # cant fit, finalize example
finished_src.append(a )
finished_tgt.append(a )
snake_case__ , snake_case__ = src, tgt
else: # can fit, keep adding
snake_case__ , snake_case__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a )
finished_tgt.append(a )
return finished_src, finished_tgt
def snake_case__ ( a , a , a , a ) -> Any:
'''simple docstring'''
snake_case__ = Path(a )
save_path.mkdir(exist_ok=a )
for split in ["train"]:
snake_case__ , snake_case__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
snake_case__ = [x.rstrip() for x in Path(a ).open().readlines()]
snake_case__ = [x.rstrip() for x in Path(a ).open().readlines()]
snake_case__ , snake_case__ = pack_examples(a , a , a , a )
print(F"""packed {split} split from {len(a )} examples -> {len(a )}.""" )
Path(save_path / F"""{split}.source""" ).open("""w""" ).write("""\n""".join(a ) )
Path(save_path / F"""{split}.target""" ).open("""w""" ).write("""\n""".join(a ) )
for split in ["val", "test"]:
snake_case__ , snake_case__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(a , save_path / F"""{split}.source""" )
shutil.copyfile(a , save_path / F"""{split}.target""" )
def snake_case__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=a , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=a , default=128 )
parser.add_argument("""--data_dir""" , type=a )
parser.add_argument("""--save_path""" , type=a )
snake_case__ = parser.parse_args()
snake_case__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli() | 566 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
a__ = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
a__ = dataset.iloc[:, 1:2].values
a__ = dataset.iloc[:, 2].values
a__ , a__ , a__ , a__ = train_test_split(X, y, test_size=0.2, random_state=0)
a__ = PolynomialFeatures(degree=4)
a__ = poly_reg.fit_transform(X)
a__ = LinearRegression()
pol_reg.fit(X_poly, y)
def snake_case__ ( ) -> int:
'''simple docstring'''
plt.scatter(a , a , color="""red""" )
plt.plot(a , pol_reg.predict(poly_reg.fit_transform(a ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 566 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase : str = logging.get_logger(__name__)
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
_snake_case = ['''pixel_values''']
def __init__( self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = None , a_ = True , a_ = 1 / 2_5_5 , a_ = True , a_ = None , a_ = None , **a_ , ) -> None:
super().__init__(**a_ )
lowercase : Any = size if size is not None else {"shortest_edge": 2_5_6}
lowercase : Union[str, Any] = get_size_dict(a_ , default_to_square=a_ )
lowercase : List[str] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
lowercase : List[Any] = get_size_dict(a_ )
lowercase : List[str] = do_resize
lowercase : List[Any] = size
lowercase : List[Any] = resample
lowercase : List[Any] = do_center_crop
lowercase : Optional[Any] = crop_size
lowercase : Tuple = do_rescale
lowercase : Union[str, Any] = rescale_factor
lowercase : Tuple = do_normalize
lowercase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , a_ , a_ , a_ = PILImageResampling.BICUBIC , a_ = None , **a_ , ) -> np.ndarray:
lowercase : Union[str, Any] = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase : int = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray:
lowercase : Optional[Any] = get_size_dict(a_ )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ , a_ = None , **a_ ) -> np.ndarray:
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray:
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ) -> Dict:
lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : List[str] = size if size is not None else self.size
lowercase : Optional[int] = get_size_dict(a_ , default_to_square=a_ )
lowercase : List[str] = resample if resample is not None else self.resample
lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Dict = crop_size if crop_size is not None else self.crop_size
lowercase : Any = get_size_dict(a_ )
lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[str] = image_mean if image_mean is not None else self.image_mean
lowercase : str = image_std if image_std is not None else self.image_std
lowercase : str = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowercase : Any = [to_numpy_array(a_ ) for image in images]
if do_resize:
lowercase : List[Any] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
lowercase : List[Any] = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
lowercase : List[Any] = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
lowercase : List[Any] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
lowercase : List[Any] = [to_channel_dimension_format(a_ , a_ ) for image in images]
lowercase : Dict = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 372 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase : List[str] = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
lowerCAmelCase : Union[str, Any] = 1_0
lowerCAmelCase : Optional[Any] = 2_5_6
def _A ( A ) -> Optional[MinHash]:
if len(A ) < MIN_NUM_TOKENS:
return None
lowercase : List[Any] = MinHash(num_perm=A )
for token in set(A ):
min_hash.update(token.encode() )
return min_hash
def _A ( A ) -> Set[str]:
return {t for t in NON_ALPHA.split(A ) if len(t.strip() ) > 0}
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , *,
a_ = 0.85 , ) -> List[str]:
lowercase : Any = duplication_jaccard_threshold
lowercase : str = NUM_PERM
lowercase : Union[str, Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowercase : Any = defaultdict(a_ )
def a__ ( self , a_ , a_ ) -> None:
lowercase : Dict = self._index.query(a_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(a_ , a_ )
if len(a_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a_ )
def a__ ( self ) -> List[List[Dict]]:
lowercase : str = []
for base, duplicates in self._duplicate_clusters.items():
lowercase : str = [base] + list(a_ )
# reformat the cluster to be a list of dict
lowercase : Optional[Any] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(a_ )
return duplicate_clusters
def a__ ( self , a_ ) -> None:
lowercase : Tuple = self.get_duplicate_clusters()
with open(a_ , "w" ) as f:
json.dump(a_ , a_ )
def _A ( A ) -> Dict:
lowercase , lowercase : List[str] = element
lowercase : int = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _A ( A ) -> Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(A ,max_queue_size=1_0_0_0_0 ) ,chunksize=1_0_0 ,):
if data is not None:
yield data
def _A ( A ,A ) -> List[str]:
lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A ) ) ,max_queue_size=1_0_0 ) ):
di.add(A ,A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _A ( A ,A ) -> float:
lowercase : int = get_tokens(A )
lowercase : Dict = get_tokens(A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase : List[str] = None
def _A ( A ,A ) -> Union[str, Any]:
lowercase : int = []
for elementa in cluster:
lowercase : Any = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
lowercase : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(A ,A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase : List[Any] = 1
extremes.append(A )
return extremes
def _A ( A ,A ,A ) -> Optional[Any]:
global _shared_dataset
lowercase : Dict = dataset
lowercase : int = []
lowercase : int = partial(_find_cluster_extremes_shared ,jaccard_threshold=A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
A ,A ,) ,total=len(A ) ,):
extremes_list.append(A )
return extremes_list
def _A ( A ,A = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
lowercase : Dict = make_duplicate_clusters(A ,A )
lowercase : List[str] = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
lowercase : Any = {}
lowercase : int = find_extremes(A ,A ,A )
for extremes in extremes_clusters:
for element in extremes:
lowercase : str = element
lowercase : str = duplicate_indices - set(extreme_dict.keys() )
lowercase : Any = dataset.filter(lambda A ,A : idx not in remove_indices ,with_indices=A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase : List[str] = element["base_index"] in extreme_dict
if element["is_extreme"]:
lowercase : str = extreme_dict[element["base_index"]]["copies"]
print(F'''Original dataset size: {len(A )}''' )
print(F'''Number of duplicate clusters: {len(A )}''' )
print(F'''Files in duplicate cluster: {len(A )}''' )
print(F'''Unique files in duplicate cluster: {len(A )}''' )
print(F'''Filtered dataset size: {len(A )}''' )
return ds_filter, duplicate_clusters
| 372 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__a : Optional[Any] = """
import os
"""
__a : Optional[Any] = """
def foo():
import os
return False
"""
__a : Dict = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
__a : Dict = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
__a : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
__a : Tuple = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
__a : str = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
__a : Dict = """
import os
try:
import bar
except:
raise ValueError()
"""
__a : str = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
__a : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
__a : Union[str, Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase )
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = os.path.join(lowercase , '''test_file.py''' )
with open(lowercase , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase )
__lowercase = get_imports(lowercase )
assert parsed_imports == ["os"] | 522 | import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowercase , lowercase , bias=lowercase )
__lowercase = emb.weight.data
return lin_layer
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
__lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowercase )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MaMaaaConfig(
vocab_size=lowercase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MaMaaaForConditionalGeneration(lowercase )
model.model.load_state_dict(lowercase , strict=lowercase )
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__a : Union[str, Any] = parser.parse_args()
__a : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 522 | 1 |
'''simple docstring'''
def _A ( UpperCAmelCase = 1 ,UpperCAmelCase = 1000 ):
'''simple docstring'''
A__ = 1
A__ = 0
for divide_by_number in range(UpperCAmelCase ,digit + 1 ):
A__ = []
A__ = numerator
for _ in range(1 ,digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase ):
A__ = len(UpperCAmelCase )
A__ = divide_by_number
else:
has_been_divided.append(UpperCAmelCase )
A__ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 531 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 1_6
lowerCAmelCase_ = 3_2
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase = 16 ):
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('bert-base-cased' )
A__ = DatasetDict(
{
'train': dataset['train'].select(UpperCAmelCase ),
'validation': dataset['train'].select(UpperCAmelCase ),
'test': dataset['validation'],
} )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=UpperCAmelCase ,max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
UpperCAmelCase ,batched=UpperCAmelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
UpperCAmelCase ,padding='longest' ,max_length=UpperCAmelCase ,pad_to_multiple_of=UpperCAmelCase ,return_tensors='pt' ,)
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['train'] ,shuffle=UpperCAmelCase ,collate_fn=UpperCAmelCase ,batch_size=UpperCAmelCase )
A__ = DataLoader(
tokenized_datasets['validation'] ,shuffle=UpperCAmelCase ,collate_fn=UpperCAmelCase ,batch_size=UpperCAmelCase )
A__ = DataLoader(
tokenized_datasets['test'] ,shuffle=UpperCAmelCase ,collate_fn=UpperCAmelCase ,batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader, test_dataloader
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = []
# Download the dataset
A__ = load_dataset('glue' ,'mrpc' )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config['num_epochs'] )
A__ = int(config['seed'] )
A__ = int(config['batch_size'] )
A__ = evaluate.load('glue' ,'mrpc' )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets['train'].num_rows ) ,datasets['train']['label'] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCAmelCase ):
A__ , A__ , A__ = get_fold_dataloaders(
UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' ,return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() ,lr=UpperCAmelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase ,num_warmup_steps=100 ,num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**UpperCAmelCase )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCAmelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCAmelCase ,references=UpperCAmelCase ,)
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,UpperCAmelCase )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCAmelCase )
A__ = outputs.logits
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCAmelCase ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(UpperCAmelCase ,dim=0 )
A__ = torch.stack(UpperCAmelCase ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=UpperCAmelCase ,references=UpperCAmelCase )
accelerator.print('Average test metrics from all folds:' ,UpperCAmelCase )
def _A ( ):
'''simple docstring'''
A__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' ,type=UpperCAmelCase ,default=UpperCAmelCase ,choices=['no', 'fp16', 'bf16', 'fp8'] ,help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' ,)
parser.add_argument('--cpu' ,action='store_true' ,help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' ,type=UpperCAmelCase ,default=3 ,help='The number of splits to perform across the dataset' )
A__ = parser.parse_args()
A__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCAmelCase ,UpperCAmelCase )
if __name__ == "__main__":
main()
| 531 | 1 |
'''simple docstring'''
import operator as op
def __UpperCamelCase ( _lowercase ) -> Optional[int]:
_lowercase : Optional[Any] = []
_lowercase : Any = lambda _lowercase, _lowercase : int(x / y ) # noqa: E731 integer division operation
_lowercase : str = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ), 'Action'.center(12 ), 'Stack', sep=' | ' )
print('-' * (30 + len(_lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('push(' + x + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ' )
else:
_lowercase : Optional[int] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + b + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ' )
_lowercase : Tuple = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + a + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ' )
stack.append(
str(opr[x](int(_lowercase ), int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('push(' + a + x + b + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ', )
return int(stack[0] )
if __name__ == "__main__":
_A : List[str] =input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 0 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
a = logging.get_logger(__name__)
class _A ( __lowercase ):
__a = ['input_features', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=1_6000 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE="hamming_window" , _SCREAMING_SNAKE_CASE=3_2768.0 , _SCREAMING_SNAKE_CASE=0.97 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = feature_size
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = padding_value
_UpperCAmelCase = hop_length
_UpperCAmelCase = win_length
_UpperCAmelCase = frame_signal_scale
_UpperCAmelCase = preemphasis_coeff
_UpperCAmelCase = mel_floor
_UpperCAmelCase = normalize_means
_UpperCAmelCase = normalize_vars
_UpperCAmelCase = win_function
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = win_length * sampling_rate // 1000
_UpperCAmelCase = hop_length * sampling_rate // 1000
_UpperCAmelCase = optimal_fft_length(self.sample_size )
_UpperCAmelCase = (self.n_fft // 2) + 1
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.win_function == "hamming_window":
_UpperCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=SCREAMING_SNAKE_CASE_ )
else:
_UpperCAmelCase = window_function(window_length=self.sample_size , name=self.win_function )
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_UpperCAmelCase = spectrogram(
one_waveform * self.frame_signal_scale , window=SCREAMING_SNAKE_CASE_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=SCREAMING_SNAKE_CASE_ , preemphasis=self.preemphasis_coeff , mel_filters=SCREAMING_SNAKE_CASE_ , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# make sure we normalize float32 arrays
if self.normalize_means:
_UpperCAmelCase = x[:input_length].mean(axis=0 )
_UpperCAmelCase = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.normalize_vars:
_UpperCAmelCase = x[:input_length].std(axis=0 )
_UpperCAmelCase = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if input_length < x.shape[0]:
_UpperCAmelCase = padding_value
# make sure array is in float32
_UpperCAmelCase = x.astype(np.floataa )
return x
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
_UpperCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
_UpperCAmelCase = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
_UpperCAmelCase = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase = [raw_speech]
# extract fbank features
_UpperCAmelCase = [self._extract_mfsc_features(SCREAMING_SNAKE_CASE_ ) for one_waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase = BatchFeature({"""input_features""": features} )
_UpperCAmelCase = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
_UpperCAmelCase = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
_UpperCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_UpperCAmelCase = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_UpperCAmelCase = self.normalize(
padded_inputs["""input_features"""] , attention_mask=SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
_UpperCAmelCase = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs | 518 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Union[str, Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCamelCase :Any = 128
elif "12-12" in model_name:
UpperCamelCase :Union[str, Any] = 12
UpperCamelCase :Any = 12
elif "14-14" in model_name:
UpperCamelCase :Optional[int] = 14
UpperCamelCase :List[str] = 14
elif "16-16" in model_name:
UpperCamelCase :List[Any] = 16
UpperCamelCase :Optional[Any] = 16
else:
raise ValueError('''Model not supported''' )
UpperCamelCase :Tuple = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCamelCase :Optional[Any] = 35
UpperCamelCase :List[Any] = '''speech-commands-v2-id2label.json'''
else:
UpperCamelCase :Optional[int] = 527
UpperCamelCase :List[Any] = '''audioset-id2label.json'''
UpperCamelCase :Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCamelCase :List[Any] = idalabel
UpperCamelCase :List[Any] = {v: k for k, v in idalabel.items()}
return config
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if "module.v" in name:
UpperCamelCase :Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCamelCase :int = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCamelCase :Tuple = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCamelCase :Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase :str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCamelCase :Any = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase :str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase :Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase :Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase :List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCamelCase :Union[str, Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCamelCase :int = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCamelCase :Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
for key in orig_state_dict.copy().keys():
UpperCamelCase :Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
UpperCamelCase :Any = key.split('''.''' )
UpperCamelCase :str = int(key_split[3] )
UpperCamelCase :Union[str, Any] = config.hidden_size
if "weight" in key:
UpperCamelCase :List[str] = val[:dim, :]
UpperCamelCase :Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase :Optional[Any] = val[-dim:, :]
else:
UpperCamelCase :Dict = val[:dim]
UpperCamelCase :Optional[int] = val[dim : dim * 2]
UpperCamelCase :List[Any] = val[-dim:]
else:
UpperCamelCase :Union[str, Any] = val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[str] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=False ):
UpperCamelCase :Optional[Any] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCamelCase :Optional[int] = model_name_to_url[model_name]
UpperCamelCase :Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
UpperCamelCase :Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
UpperCamelCase :int = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCamelCase :Union[str, Any] = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
UpperCamelCase :List[str] = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
UpperCamelCase :Optional[Any] = 1024 if '''speech-commands''' not in model_name else 128
UpperCamelCase :int = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
UpperCamelCase :Dict = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCamelCase :List[Any] = dataset[0]['''audio''']['''array''']
else:
UpperCamelCase :List[Any] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCamelCase , UpperCamelCase :Dict = torchaudio.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = waveform.squeeze().numpy()
UpperCamelCase :Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='''pt''' )
# forward pass
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCamelCase :Tuple = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCamelCase :Union[str, Any] = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCamelCase :str = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCamelCase :List[str] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCamelCase :Dict = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCamelCase :List[str] = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCamelCase :Optional[int] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCamelCase :List[Any] = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 658 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->bool:
lowercase_ = get_failure_array(SCREAMING_SNAKE_CASE_ )
# 2) Step through text searching for pattern
lowercase_ , lowercase_ = 0, 0 # index into text, pattern
while i < len(SCREAMING_SNAKE_CASE_ ):
if pattern[j] == text[i]:
if j == (len(SCREAMING_SNAKE_CASE_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowercase_ = failure[j - 1]
continue
i += 1
return False
def A_ ( SCREAMING_SNAKE_CASE_ ) ->list[int]:
lowercase_ = [0]
lowercase_ = 0
lowercase_ = 1
while j < len(SCREAMING_SNAKE_CASE_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowercase_ = failure[i - 1]
continue
j += 1
failure.append(SCREAMING_SNAKE_CASE_ )
return failure
if __name__ == "__main__":
# Test 1)
__snake_case = """abc1abc12"""
__snake_case = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
__snake_case = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__snake_case = """ABABX"""
__snake_case = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
__snake_case = """AAAB"""
__snake_case = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
__snake_case = """abcdabcy"""
__snake_case = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
__snake_case = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 603 | '''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _a ( __a , __a , __a , unittest.TestCase ):
"""simple docstring"""
A_ = StableUnCLIPPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A_ = False
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowercase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase_ = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_000 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase_ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCamelCase__ ( self : str , lowercase_ : int , lowercase_ : int=0 ):
'''simple docstring'''
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(lowercase_ )
else:
lowercase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowercase_ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ = pipe("""anime turle""" , generator=lowercase_ , output_type="""np""" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 603 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : List[str] = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _lowerCAmelCase(a : Optional[int] ) -> Optional[int]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowerCAmelCase(a : Optional[Any] ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a )
def _lowerCAmelCase(a : Optional[Any] ) -> int:
from transformers.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a , id=a )
def _lowerCAmelCase(a : List[Any] , a : Optional[int] ) -> int:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
_SCREAMING_SNAKE_CASE =0
# Doctest custom flag to ignore output.
UpperCAmelCase_ : List[str] = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase_ : Optional[Any] = doctest.OutputChecker
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _A , _A , _A )
UpperCAmelCase_ : str = CustomOutputChecker
UpperCAmelCase_ : Union[str, Any] = HfDoctestModule
UpperCAmelCase_ : Optional[Any] = HfDocTestParser
| 255 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase_ : Tuple = '''Muhammad Umer Farooq'''
UpperCAmelCase_ : List[str] = '''MIT'''
UpperCAmelCase_ : Any = '''1.0.0'''
UpperCAmelCase_ : int = '''Muhammad Umer Farooq'''
UpperCAmelCase_ : Optional[int] = '''contact@muhammadumerfarooq.me'''
UpperCAmelCase_ : Tuple = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =domain
def UpperCamelCase_ ( self , _A , _A ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_SCREAMING_SNAKE_CASE =parse.urljoin(self.domain , _A )
self.urls.append(_A )
def _lowerCAmelCase(a : str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _lowerCAmelCase(a : str ) -> str:
return parse.urlparse(a ).netloc
def _lowerCAmelCase(a : str = "https://github.com" ) -> list[str]:
_SCREAMING_SNAKE_CASE =get_domain_name(a )
# Initialize the parser
_SCREAMING_SNAKE_CASE =Parser(a )
try:
# Open URL
_SCREAMING_SNAKE_CASE =requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_SCREAMING_SNAKE_CASE =set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_SCREAMING_SNAKE_CASE =requests.get(a )
# Get the valid email.
_SCREAMING_SNAKE_CASE =re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = emails_from_url('''https://github.com''')
print(f"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 255 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : List[Any]=18 , UpperCAmelCase_ : int=30 , UpperCAmelCase_ : List[Any]=400 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : int=False , ) ->str:
"""simple docstring"""
snake_case_ = size if size is not None else {"""height""": 20, """width""": 20}
snake_case_ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_reduce_labels
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _a ( ) -> List[Any]:
snake_case_ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case_ = Image.open(dataset[0]["""file"""] )
snake_case_ = Image.open(dataset[1]["""file"""] )
return image, map
def _a ( ) -> Dict:
snake_case_ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case_ = Image.open(ds[0]["""file"""] )
snake_case_ = Image.open(ds[1]["""file"""] )
snake_case_ = Image.open(ds[2]["""file"""] )
snake_case_ = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: List[str] = BeitImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = BeitImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
snake_case_ = []
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case_ , snake_case_ = prepare_semantic_single_inputs()
snake_case_ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
snake_case_ , snake_case_ = prepare_semantic_batch_inputs()
snake_case_ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case_ , snake_case_ = prepare_semantic_single_inputs()
snake_case_ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
snake_case_ = True
snake_case_ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 2 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = 0
snake_case_ = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case_ = i + 1
else:
snake_case_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 2 | 1 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__UpperCamelCase : List[str] = 0B10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__UpperCamelCase : List[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
def __init__( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = WATERMARK_BITS
__lowercase = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def _a ( self : List[Any] , _lowerCAmelCase : torch.FloatTensor ) -> Union[str, Any]:
"""simple docstring"""
if images.shape[-1] < 256:
return images
__lowercase = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowercase = [self.encoder.encode(_lowerCAmelCase , """dwtDct""" ) for image in images]
__lowercase = torch.from_numpy(np.array(_lowerCAmelCase ) ).permute(0 , 3 , 1 , 2 )
__lowercase = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 80 |
from PIL import Image
def lowerCamelCase__ ( __A :Image ):
"""simple docstring"""
__snake_case , __snake_case = image.size
__snake_case = 0
__snake_case = image.load()
for i in range(__A ):
for j in range(__A ):
__snake_case = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
__snake_case = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCamelCase__ = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 268 | 0 |
def __snake_case ( _UpperCAmelCase : int):
UpperCamelCase = abs(_UpperCAmelCase)
UpperCamelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def __snake_case ( _UpperCAmelCase : int):
UpperCamelCase = abs(_UpperCAmelCase)
return n if n < 10 else n % 10 + sum_of_digits(n // 10)
def __snake_case ( _UpperCAmelCase : int):
return sum(int(_UpperCAmelCase) for c in str(abs(_UpperCAmelCase)))
def __snake_case ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCAmelCase : Callable, _UpperCAmelCase : int) -> None:
UpperCamelCase = f'{func.__name__}({value})'
UpperCamelCase = timeit(f'__main__.{call}', setup='''import __main__''')
print(f'{call:56} = {func(_UpperCAmelCase)} -- {timing:.4f} seconds')
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCAmelCase, _UpperCAmelCase)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 701 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ):
'''simple docstring'''
super().__init__()
UpperCamelCase = initial_learning_rate
UpperCamelCase = warmup_steps
UpperCamelCase = power
UpperCamelCase = decay_schedule_fn
UpperCamelCase = name
def __call__( self , lowerCamelCase__ ):
'''simple docstring'''
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase = tf.cast(lowerCamelCase__ , tf.floataa )
UpperCamelCase = tf.cast(self.warmup_steps , tf.floataa )
UpperCamelCase = global_step_float / warmup_steps_float
UpperCamelCase = self.initial_learning_rate * tf.math.pow(lowerCamelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase__ , )
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __snake_case ( _UpperCAmelCase : float, _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : float = 0.0, _UpperCAmelCase : float = 0.9, _UpperCAmelCase : float = 0.9_9_9, _UpperCAmelCase : float = 1E-8, _UpperCAmelCase : Optional[float] = None, _UpperCAmelCase : Optional[float] = None, _UpperCAmelCase : float = 0.0, _UpperCAmelCase : float = 1.0, _UpperCAmelCase : Optional[List[str]] = None, ):
UpperCamelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_UpperCAmelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=_UpperCAmelCase, )
if num_warmup_steps:
UpperCamelCase = WarmUp(
initial_learning_rate=_UpperCAmelCase, decay_schedule_fn=_UpperCAmelCase, warmup_steps=_UpperCAmelCase, )
if weight_decay_rate > 0.0:
UpperCamelCase = AdamWeightDecay(
learning_rate=_UpperCAmelCase, weight_decay_rate=_UpperCAmelCase, beta_a=_UpperCAmelCase, beta_a=_UpperCAmelCase, epsilon=_UpperCAmelCase, clipnorm=_UpperCAmelCase, global_clipnorm=_UpperCAmelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=_UpperCAmelCase, )
else:
UpperCamelCase = tf.keras.optimizers.Adam(
learning_rate=_UpperCAmelCase, beta_a=_UpperCAmelCase, beta_a=_UpperCAmelCase, epsilon=_UpperCAmelCase, clipnorm=_UpperCAmelCase, global_clipnorm=_UpperCAmelCase, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ = 0.001 , lowerCamelCase__ = 0.9 , lowerCamelCase__ = 0.999 , lowerCamelCase__ = 1e-7 , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "AdamWeightDecay" , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase = weight_decay_rate
UpperCamelCase = include_in_weight_decay
UpperCamelCase = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = {'''WarmUp''': WarmUp}
return super(lowerCamelCase__ , cls ).from_config(lowerCamelCase__ , custom_objects=lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
super(lowerCamelCase__ , self )._prepare_local(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__ , self ).apply_gradients(zip(lowerCamelCase__ , lowerCamelCase__ ) , name=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase = apply_state or {}
UpperCamelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCamelCase = self._fallback_apply_state(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
UpperCamelCase = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_dense(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
UpperCamelCase = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_sparse(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return False
return True
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = None
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self._accum_steps is None:
UpperCamelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowerCamelCase__ ):
'''simple docstring'''
if not self._gradients:
UpperCamelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}' )
for accum_gradient, gradient in zip(self._gradients , lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 350 | 0 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A( lowerCAmelCase=32 , lowerCAmelCase=10 , lowerCAmelCase=100 , lowerCAmelCase=1026 , lowerCAmelCase=True , lowerCAmelCase="data/tokenized_stories_train_wikitext103.jbl" , lowerCAmelCase="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
A__ , A__ : str = generate_datasets(
lowerCAmelCase , lowerCAmelCase , number=lowerCAmelCase , min_len=1026 , trim=lowerCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
A__ : Optional[int] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
A__ : List[str] = load_gpta("""gpt2""" ).to(lowerCAmelCase )
print("""computing perplexity on objective set""" )
A__ : Any = compute_perplexity(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).item()
print("""perplexity on objective set:""" , lowerCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A( lowerCAmelCase , lowerCAmelCase=15 , lowerCAmelCase=128 , lowerCAmelCase=100 , lowerCAmelCase="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
A__ : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
A__ : List[Any] = SecondaryLearner(lowerCAmelCase )
# Train secondary learner
A__ : Dict = train_secondary_learner(
lowerCAmelCase , lowerCAmelCase , max_epochs=lowerCAmelCase , batch_size=lowerCAmelCase , eval_freq=100 , igf_model_path=lowerCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=32 , lowerCAmelCase=1000 , lowerCAmelCase=16 , lowerCAmelCase=1.0 , lowerCAmelCase=recopy_gpta , lowerCAmelCase=None , lowerCAmelCase=10 , lowerCAmelCase="gpt2_finetuned.pt" , ):
A__ : List[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
A__ : Optional[Any] = RandomSampler(lowerCAmelCase )
A__ : Dict = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase )
A__ : Tuple = max_steps // (len(lowerCAmelCase )) + 1
A__ : int = 0
A__ : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCAmelCase )
A__ , A__ , A__ : str = recopy_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowerCAmelCase )
secondary_learner.eval()
A__ : List[str] = []
A__ : Optional[Any] = 0
A__ : Union[str, Any] = []
A__ : List[Any] = []
# Compute the performance of the transformer model at the beginning
A__ : str = compute_perplexity(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
test_perps.append(lowerCAmelCase )
print("""Test perplexity, step""" , lowerCAmelCase , """:""" , lowerCAmelCase )
for epoch in range(int(lowerCAmelCase ) ):
for step, example in enumerate(lowerCAmelCase ):
torch.cuda.empty_cache()
A__ : str = random.randint(0 , example.size(2 ) - context_len - 1 )
A__ : Union[str, Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
A__ : Dict = model(lowerCAmelCase , labels=lowerCAmelCase )
A__ : Union[str, Any] = True
if secondary_learner is not None:
A__ : Optional[Any] = secondary_learner.forward(
torch.tensor(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowerCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
A__ : Dict = -1
if predicted_q < threshold:
A__ : Tuple = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
A__ : List[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
A__ : List[str] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
A__ : Any = compute_perplexity(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
test_perps.append(lowerCAmelCase )
print("""Test perplexity, step""" , lowerCAmelCase , """:""" , lowerCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowerCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A( ):
A__ : Union[str, Any] = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=lowerCAmelCase , default=lowerCAmelCase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=lowerCAmelCase , default=lowerCAmelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase , default=lowerCAmelCase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=lowerCAmelCase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=lowerCAmelCase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1000 , type=lowerCAmelCase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=lowerCAmelCase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=lowerCAmelCase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=lowerCAmelCase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=lowerCAmelCase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1026 , type=lowerCAmelCase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=lowerCAmelCase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=lowerCAmelCase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=lowerCAmelCase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=lowerCAmelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
A__ : List[Any] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
A__ : Union[str, Any] = training_secondary_learner(
lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
A__ : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
A__ , A__ : Any = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=lowerCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=lowerCAmelCase , secondary_learner=lowerCAmelCase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 363 | """simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCamelCase = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
_UpperCamelCase = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
_UpperCamelCase = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase (datasets.Metric ):
'''simple docstring'''
def lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : str = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A__ : Optional[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A__ : Optional[Any] = evaluate(dataset=snake_case_ , predictions=snake_case_ )
return score
| 363 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def UpperCAmelCase__ ( lowercase__ ) -> Dict:
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , lowercase : str ) -> List[str]:
"""simple docstring"""
__lowercase = metric_id
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : List[str] = [MetricMock(_UpperCAmelCase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def snake_case__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
if "tmp_path" in args:
__lowercase = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(lowercase__ , match="""https://huggingface.co/docs/evaluate""" ):
func(*lowercase__ )
| 713 |
def UpperCAmelCase__ ( lowercase__ = 100 ) -> int:
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634 | 0 |
'''simple docstring'''
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
SCREAMING_SNAKE_CASE = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
lowercase_ : int = 42
lowercase_ : List[Any] = None
lowercase_ : int = None
lowercase_ : Union[str, Any] = None
lowercase_ : Union[str, Any] = None
def UpperCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : List[str] ):
'''simple docstring'''
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.major, self.minor, self.patch
def UpperCamelCase ( self : int , snake_case__ : Any ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return Version(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return other
raise TypeError(F"""{other} (type {type(__UpperCamelCase )}) cannot be compared to version.""" )
def __eq__( self : Union[str, Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[Any] = self._validate_operand(__UpperCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Tuple , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self._validate_operand(__UpperCamelCase )
return self.tuple < other.tuple
def __hash__( self : Optional[int] ):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCamelCase ( cls : str , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.version_str
def snake_case_ ( lowercase__ ):
UpperCAmelCase__ : Optional[int] = _VERSION_REG.match(UpperCamelCase__ )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(UpperCamelCase__ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def snake_case_ ( lowercase__ ):
return ".".join(str(UpperCamelCase__ ) for v in version_tuple )
| 199 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = "encodec"
def __init__( self: Any , __UpperCamelCase: List[Any]=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , __UpperCamelCase: Dict=2_4000 , __UpperCamelCase: str=1 , __UpperCamelCase: Union[str, Any]=False , __UpperCamelCase: List[str]=None , __UpperCamelCase: Tuple=None , __UpperCamelCase: Optional[int]=128 , __UpperCamelCase: List[str]=32 , __UpperCamelCase: Union[str, Any]=1 , __UpperCamelCase: List[Any]=[8, 5, 4, 2] , __UpperCamelCase: List[Any]="weight_norm" , __UpperCamelCase: Tuple=7 , __UpperCamelCase: Union[str, Any]=7 , __UpperCamelCase: List[Any]=3 , __UpperCamelCase: Tuple=2 , __UpperCamelCase: str=True , __UpperCamelCase: List[Any]="reflect" , __UpperCamelCase: List[str]=2 , __UpperCamelCase: Optional[int]=2 , __UpperCamelCase: Optional[int]=1.0 , __UpperCamelCase: int=1024 , __UpperCamelCase: Union[str, Any]=None , __UpperCamelCase: List[Any]=True , **__UpperCamelCase: Any , ) -> List[Any]:
__magic_name__ : Optional[int] = target_bandwidths
__magic_name__ : Optional[int] = sampling_rate
__magic_name__ : int = audio_channels
__magic_name__ : str = normalize
__magic_name__ : Dict = chunk_length_s
__magic_name__ : Union[str, Any] = overlap
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : int = num_filters
__magic_name__ : Optional[int] = num_residual_layers
__magic_name__ : Tuple = upsampling_ratios
__magic_name__ : Union[str, Any] = norm_type
__magic_name__ : Dict = kernel_size
__magic_name__ : Union[str, Any] = last_kernel_size
__magic_name__ : Union[str, Any] = residual_kernel_size
__magic_name__ : Tuple = dilation_growth_rate
__magic_name__ : Optional[Any] = use_causal_conv
__magic_name__ : int = pad_mode
__magic_name__ : str = compress
__magic_name__ : Dict = num_lstm_layers
__magic_name__ : Tuple = trim_right_ratio
__magic_name__ : List[str] = codebook_size
__magic_name__ : List[Any] = codebook_dim if codebook_dim is not None else hidden_size
__magic_name__ : Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**__UpperCamelCase )
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCAmelCase__ ( self: Any ) -> int:
__magic_name__ : Dict = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCAmelCase__ ( self: List[Any] ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) ) | 436 | 0 |
"""simple docstring"""
import numpy as np
_lowerCamelCase = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class snake_case :
def __init__( self :Dict ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :str ):
__SCREAMING_SNAKE_CASE : List[str] = np.where(letter == self.SQUARE )
__SCREAMING_SNAKE_CASE : Any = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :int , _lowerCamelCase :int ):
__SCREAMING_SNAKE_CASE : str = self.SQUARE[indexa - 1, indexa - 1]
return letter
def SCREAMING_SNAKE_CASE_ ( self :Dict , _lowerCamelCase :str ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = message.lower()
__SCREAMING_SNAKE_CASE : Optional[Any] = message.replace(''' ''' , '''''' )
__SCREAMING_SNAKE_CASE : Any = message.replace('''j''' , '''i''' )
__SCREAMING_SNAKE_CASE : int = np.empty((2, len(_lowerCamelCase )) )
for letter_index in range(len(_lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.letter_to_numbers(message[letter_index] )
__SCREAMING_SNAKE_CASE : Optional[Any] = numbers[0]
__SCREAMING_SNAKE_CASE : str = numbers[1]
__SCREAMING_SNAKE_CASE : Tuple = first_step.reshape(2 * len(_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
for numbers_index in range(len(_lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : str = int(second_step[numbers_index * 2] )
__SCREAMING_SNAKE_CASE : Optional[int] = int(second_step[(numbers_index * 2) + 1] )
__SCREAMING_SNAKE_CASE : List[str] = self.numbers_to_letter(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = encoded_message + letter
return encoded_message
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :str ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = message.lower()
message.replace(''' ''' , '''''' )
__SCREAMING_SNAKE_CASE : Tuple = np.empty(2 * len(_lowerCamelCase ) )
for letter_index in range(len(_lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Dict = self.letter_to_numbers(message[letter_index] )
__SCREAMING_SNAKE_CASE : Optional[Any] = numbers[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = numbers[1]
__SCREAMING_SNAKE_CASE : Dict = first_step.reshape((2, len(_lowerCamelCase )) )
__SCREAMING_SNAKE_CASE : List[str] = ''''''
for numbers_index in range(len(_lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(second_step[0, numbers_index] )
__SCREAMING_SNAKE_CASE : List[Any] = int(second_step[1, numbers_index] )
__SCREAMING_SNAKE_CASE : str = self.numbers_to_letter(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = decoded_message + letter
return decoded_message
| 707 |
"""simple docstring"""
from math import pow, sqrt
def lowerCAmelCase_ ( *lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = len(lowercase_ ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 401 | 0 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
__magic_name__ = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
__magic_name__ = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
__magic_name__ = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCamelCase__ = new_id
# turn into Numpy arrays
lowerCamelCase__ = np.array(__lowercase )
lowerCamelCase__ = np.array(__lowercase )
if reduce_labels:
lowerCamelCase__ = 255
lowerCamelCase__ = label - 1
lowerCamelCase__ = 255
lowerCamelCase__ = label != ignore_index
lowerCamelCase__ = np.not_equal(__lowercase , __lowercase )
lowerCamelCase__ = pred_label[mask]
lowerCamelCase__ = np.array(__lowercase )[mask]
lowerCamelCase__ = pred_label[pred_label == label]
lowerCamelCase__ = np.histogram(__lowercase , bins=__lowercase , range=(0, num_labels - 1) )[0]
lowerCamelCase__ = np.histogram(__lowercase , bins=__lowercase , range=(0, num_labels - 1) )[0]
lowerCamelCase__ = np.histogram(__lowercase , bins=__lowercase , range=(0, num_labels - 1) )[0]
lowerCamelCase__ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = False , ):
"""simple docstring"""
lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase__ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__lowercase , __lowercase ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = intersect_and_union(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = total_intersect_and_union(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# compute metrics
lowerCamelCase__ = {}
lowerCamelCase__ = total_area_intersect.sum() / total_area_label.sum()
lowerCamelCase__ = total_area_intersect / total_area_union
lowerCamelCase__ = total_area_intersect / total_area_label
lowerCamelCase__ = np.nanmean(__lowercase )
lowerCamelCase__ = np.nanmean(__lowercase )
lowerCamelCase__ = all_acc
lowerCamelCase__ = iou
lowerCamelCase__ = acc
if nan_to_num is not None:
lowerCamelCase__ = {metric: np.nan_to_num(__lowercase , nan=__lowercase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE_ : bool = False , ):
lowerCamelCase__ = mean_iou(
results=SCREAMING_SNAKE_CASE_ , gt_seg_maps=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , ignore_index=SCREAMING_SNAKE_CASE_ , nan_to_num=SCREAMING_SNAKE_CASE_ , label_map=SCREAMING_SNAKE_CASE_ , reduce_labels=SCREAMING_SNAKE_CASE_ , )
return iou_result
| 129 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
__magic_name__ = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=None ):
lowerCamelCase__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = module._original_module if isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) else module
class SCREAMING_SNAKE_CASE__ :
snake_case = []
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
lowerCamelCase__ = obj
lowerCamelCase__ = target
lowerCamelCase__ = new
lowerCamelCase__ = target.split(""".""" )[0]
lowerCamelCase__ = {}
lowerCamelCase__ = attrs or []
def __enter__( self : Dict ):
*lowerCamelCase__ , lowerCamelCase__ = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
try:
lowerCamelCase__ = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowerCamelCase__ = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(SCREAMING_SNAKE_CASE_ , attrs=self.attrs ) )
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , attrs=self.attrs ) )
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowerCamelCase__ = getattr(import_module(""".""".join(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE_ ) is attr_value:
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowerCamelCase__ = globals()["""__builtins__"""][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple ):
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.original.pop(SCREAMING_SNAKE_CASE_ ) )
def __UpperCAmelCase ( self : List[Any] ):
self.__enter__()
self._active_patches.append(self )
def __UpperCAmelCase ( self : str ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 129 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=A , )
assert hasattr(self , """env""" )
def _lowercase( self , A ) -> str:
UpperCAmelCase : Any = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
UpperCAmelCase : Tuple = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A , py_version="""py36""" , )
def _lowercase( self , A ) -> Any:
TrainingJobAnalytics(A ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def _lowercase( self , A ) -> str:
# create estimator
UpperCAmelCase : Union[str, Any] = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCAmelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A )
| 672 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[int]:
# Initialise PyTorch model
lowerCAmelCase_ = LxmertConfig.from_json_file(a_ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase_ = LxmertForPreTraining(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(a_ , a_ , a_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 318 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=9_9 , lowercase_=6_4 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
lowerCAmelCase_ = vocab_size - 1
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ = True
return config, input_ids, input_mask, token_labels
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = GPTNeoXModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )
lowerCAmelCase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = GPTNeoXModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = GPTNeoXForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = GPTNeoXForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = GPTNeoXForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = GPTNeoXForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = GPTNeoXForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
lowerCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , output_hidden_states=lowercase_ )
lowerCAmelCase_ = output_from_no_past['hidden_states'][0]
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
lowerCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: str = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__a: Optional[Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__a: List[str] = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__a: Any = False
__a: Tuple = False
__a: List[Any] = False
__a: Optional[int] = False
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = GPTNeoXModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ , hidden_size=6_4 , num_attention_heads=8 )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase_ , lowercase_ , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase_ , lowercase_ , lowercase_ )
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase_ = None
self.model_tester.create_and_check_model_as_decoder(lowercase_ , lowercase_ , lowercase_ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_ , lowercase_ , lowercase_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase_ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def _lowercase ( self ) -> str:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = GPTNeoXModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
lowerCAmelCase_ = original_model(lowercase_ ).last_hidden_state
lowerCAmelCase_ = original_model(lowercase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase_ = GPTNeoXModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
lowerCAmelCase_ = scaled_model(lowercase_ ).last_hidden_state
lowerCAmelCase_ = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCAmelCase_ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowercase_ )
lowerCAmelCase_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowercase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCAmelCase_ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCAmelCase_ = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=2_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ )[0]
self.assertEqual(lowercase_ , lowercase_ )
| 318 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ''
else:
A__ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = dct.pop(UpperCamelCase__ )
A__ = val
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ):
"""simple docstring"""
A__ = ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ = 8
# set labels if required
if not base_model:
A__ = 1_000
A__ = 'huggingface/label-files'
A__ = 'imagenet-1k-id2label.json'
A__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
# load original model from torch hub
A__ = torch.hub.load('facebookresearch/dino:main' , UpperCamelCase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = original_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase__ )
A__ = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
if base_model:
A__ = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ ).eval()
else:
A__ = ViTForImageClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor
A__ = ViTImageProcessor()
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(UpperCamelCase__ )
if base_model:
A__ = original_model(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
A__ = original_model(UpperCamelCase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
__lowerCamelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 536 | """simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
__lowerCamelCase = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
__lowerCamelCase = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
__lowerCamelCase = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
A__ = new_id
# turn into Numpy arrays
A__ = np.array(UpperCamelCase__ )
A__ = np.array(UpperCamelCase__ )
if reduce_labels:
A__ = 255
A__ = label - 1
A__ = 255
A__ = label != ignore_index
A__ = np.not_equal(UpperCamelCase__ , UpperCamelCase__ )
A__ = pred_label[mask]
A__ = np.array(UpperCamelCase__ )[mask]
A__ = pred_label[pred_label == label]
A__ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0]
A__ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
"""simple docstring"""
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCamelCase__ , UpperCamelCase__ ):
A__ , A__ , A__ , A__ = intersect_and_union(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
"""simple docstring"""
A__ , A__ , A__ , A__ = total_intersect_and_union(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# compute metrics
A__ = {}
A__ = total_area_intersect.sum() / total_area_label.sum()
A__ = total_area_intersect / total_area_union
A__ = total_area_intersect / total_area_label
A__ = np.nanmean(UpperCamelCase__ )
A__ = np.nanmean(UpperCamelCase__ )
A__ = all_acc
A__ = iou
A__ = acc
if nan_to_num is not None:
A__ = {metric: np.nan_to_num(UpperCamelCase__ , nan=UpperCamelCase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def snake_case__ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) ,reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,) -> Tuple:
A__ = mean_iou(
results=__UpperCAmelCase ,gt_seg_maps=__UpperCAmelCase ,num_labels=__UpperCAmelCase ,ignore_index=__UpperCAmelCase ,nan_to_num=__UpperCAmelCase ,label_map=__UpperCAmelCase ,reduce_labels=__UpperCAmelCase ,)
return iou_result
| 536 | 1 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = FlaxAutoencoderKL
@property
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict | 34 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 100 , ):
__a : List[str] = x_start
__a : List[str] = fnc(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates curve as a sequence of linear lines and sums their length
__a : Union[str, Any] = (x_end - x_start) / steps + xa
__a : Tuple = fnc(SCREAMING_SNAKE_CASE__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__a : str = xa
__a : str = fxa
return length
if __name__ == "__main__":
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
SCREAMING_SNAKE_CASE_ = 1_0
while i <= 1_0_0_0_0_0:
print(F"With {i} steps: {line_length(f, -1_0, 1_0, i)}")
i *= 1_0
| 597 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=UpperCamelCase_ ):
a : Union[str, Any] = ["""torch""", """scipy"""]
def __init__( self : str , *A_ : Optional[int] , **A_ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *A_ : int , **A_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *A_ : Optional[int] , **A_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """scipy"""] )
| 719 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( UpperCamelCase__ : str ):
"""simple docstring"""
def decorator(UpperCamelCase__ : Tuple ):
__lowercase = getattr(UpperCamelCase__ , """handle_key""" , [] )
handle += [key]
setattr(UpperCamelCase__ , """handle_key""" , UpperCamelCase__ )
return func
return decorator
def lowerCAmelCase_ ( *UpperCamelCase__ : List[str] ):
"""simple docstring"""
def decorator(UpperCamelCase__ : Tuple ):
__lowercase = getattr(UpperCamelCase__ , """handle_key""" , [] )
handle += keys
setattr(UpperCamelCase__ , """handle_key""" , UpperCamelCase__ )
return func
return decorator
class lowerCamelCase__ ( _a ):
def __new__( cls : str , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : int ):
'''simple docstring'''
__lowercase = super().__new__(cls , A_ , A_ , A_ )
if not hasattr(A_ , """key_handler""" ):
setattr(A_ , """key_handler""" , {} )
setattr(A_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__lowercase = getattr(A_ , """handle_key""" , [] )
for key in handled_keys:
__lowercase = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict ):
'''simple docstring'''
__lowercase = get_character()
if char != KEYMAP["undefined"]:
__lowercase = ord(A_ )
__lowercase = cls.key_handler.get(A_ )
if handler:
__lowercase = char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls : int ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 442 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = FlaxAutoencoderKL
@property
def __lowercase( self : Optional[int] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : List[str] = (32, 32)
SCREAMING_SNAKE_CASE__ : int = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.uniform(a_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE__ : Any = self.dummy_input
return init_dict, inputs_dict
| 85 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
__snake_case : str = list[list[int]]
# assigning initial values to the grid
__snake_case : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__snake_case : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _lowercase ( __snake_case ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _lowercase ( __snake_case ) -> Matrix | None:
if location := find_empty_location(__snake_case ):
__lowerCAmelCase , __lowerCAmelCase : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(__snake_case ,__snake_case ,__snake_case ,__snake_case ):
__lowerCAmelCase : Optional[Any] = digit
if sudoku(__snake_case ) is not None:
return grid
__lowerCAmelCase : Dict = 0
return None
def _lowercase ( __snake_case ) -> None:
for row in grid:
for cell in row:
print(__snake_case ,end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__snake_case : Optional[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.') | 615 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Any = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = AlbertTokenizer
SCREAMING_SNAKE_CASE = AlbertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : List[Any] = AlbertTokenizer(_SCREAMING_SNAKE_CASE)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int]) -> str:
"""simple docstring"""
__lowerCAmelCase : int = "this is a test"
__lowerCAmelCase : Any = "this is a test"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = "<pad>"
__lowerCAmelCase : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<pad>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "▁eloquent")
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , 3_0000)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : Dict = self.get_rust_tokenizer()
__lowerCAmelCase : Optional[int] = "I was born in 92000, and this is falsé."
__lowerCAmelCase : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = self.get_rust_tokenizer()
__lowerCAmelCase : str = tokenizer.encode(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = AlbertTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = tokenizer.tokenize("This is a test")
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["▁this", "▁is", "▁a", "▁test"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , [48, 25, 21, 1289])
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."])
__lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9])
__lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE)
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase : int = AlbertTokenizer(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = tokenizer.encode("sequence builders")
__lowerCAmelCase : List[Any] = tokenizer.encode("multi-sequence build")
__lowerCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , ) | 615 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_a : str = logging.getLogger(__name__)
class _lowercase ( __lowercase ):
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int]=-1 ) -> List[Any]:
# in NER datasets, the last column is usually reserved for NER label
__snake_case = label_idx
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = mode.value
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , f'{mode}.txt' )
__snake_case = 1
__snake_case = []
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as f:
__snake_case = []
__snake_case = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) )
guid_index += 1
__snake_case = []
__snake_case = []
else:
__snake_case = line.split(' ' )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) )
return examples
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : TextIO , SCREAMING_SNAKE_CASE_ : TextIO , SCREAMING_SNAKE_CASE_ : List ) -> List[Any]:
__snake_case = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__snake_case = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(SCREAMING_SNAKE_CASE_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE_ , 'r' ) as f:
__snake_case = f.read().splitlines()
if "O" not in labels:
__snake_case = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowercase ( __lowercase ):
def __init__( self : int ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE_ , 'r' ) as f:
__snake_case = f.read().splitlines()
if "O" not in labels:
__snake_case = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowercase ( __lowercase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = mode.value
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , f'{mode}.txt' )
__snake_case = 1
__snake_case = []
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE_ ):
__snake_case = []
__snake_case = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) )
guid_index += 1
return examples
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : TextIO , SCREAMING_SNAKE_CASE_ : TextIO , SCREAMING_SNAKE_CASE_ : List ) -> List[Any]:
__snake_case = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE_ ):
__snake_case = preds_list[example_id]
__snake_case = ''
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(SCREAMING_SNAKE_CASE_ )
example_id += 1
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 56 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCAmelCase : Optional[int] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCAmelCase : List[Any] = """main"""
# Default branch name
lowerCAmelCase : int = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
lowerCAmelCase : str = """aaaaaaa"""
# This commit does not exist, so we should 404.
lowerCAmelCase : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCAmelCase : Union[str, Any] = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class a ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def snake_case_ ( self ):
"""simple docstring"""
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''start_positions''', '''end_positions'''] )
class a ( __lowercase ):
pass
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels'''] )
@require_tf
def snake_case_ ( self ):
"""simple docstring"""
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''start_positions''', '''end_positions'''] )
class a ( __lowercase ):
pass
self.assertEqual(find_labels(_lowerCAmelCase ) , ['''labels'''] )
@require_flax
def snake_case_ ( self ):
"""simple docstring"""
self.assertEqual(find_labels(_lowerCAmelCase ) , [] )
self.assertEqual(find_labels(_lowerCAmelCase ) , [] )
self.assertEqual(find_labels(_lowerCAmelCase ) , [] )
class a ( __lowercase ):
pass
self.assertEqual(find_labels(_lowerCAmelCase ) , [] )
| 146 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( __lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : int = MgpstrTokenizer
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def snake_case_ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE: Any = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__SCREAMING_SNAKE_CASE: Tuple = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__SCREAMING_SNAKE_CASE: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + '''\n''' )
def snake_case_ ( self , **_lowerCAmelCase ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = '''tester'''
__SCREAMING_SNAKE_CASE: Tuple = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__SCREAMING_SNAKE_CASE: Any = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__SCREAMING_SNAKE_CASE: int = tokenizer.encode([special_token] , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
__SCREAMING_SNAKE_CASE: Tuple = tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = self.get_input_output_texts(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = tokenizer.tokenize(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertNotEqual(len(_lowerCAmelCase ) , 0 )
__SCREAMING_SNAKE_CASE: List[str] = tokenizer.decode(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowerCAmelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
| 146 | 1 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[int] , _A : Union[str, "sqlalchemy.sql.Selectable"] , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[Features] = None , _A : str = None , _A : bool = False , **_A : Dict , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
UpperCAmelCase__ : int = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : List[str] = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
UpperCAmelCase__ : Optional[int] = self.builder.as_dataset(
split='''train''' , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_ :
def __init__( self : Any , _A : Dataset , _A : str , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : Tuple , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
UpperCAmelCase__ : Any = dataset
UpperCAmelCase__ : int = name
UpperCAmelCase__ : Union[str, Any] = con
UpperCAmelCase__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase__ : int = num_proc
UpperCAmelCase__ : Optional[int] = to_sql_kwargs
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''sql''' , _A )
UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''con''' , _A )
UpperCAmelCase__ : List[Any] = self.to_sql_kwargs.pop('''index''' , _A )
UpperCAmelCase__ : Optional[int] = self._write(index=_A , **self.to_sql_kwargs )
return written
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = args
UpperCAmelCase__ : int = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
UpperCAmelCase__ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase__ : Tuple = batch.to_pandas()
UpperCAmelCase__ : Tuple = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def lowercase_ ( self : Optional[Any] , _A : Optional[int] , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 75 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def A ( lowercase , lowercase=False ) -> int:
'''simple docstring'''
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
_UpperCAmelCase : Union[str, Any] = parse_flag_from_env("RUN_SLOW", default=False)
_UpperCAmelCase : List[Any] = parse_flag_from_env("RUN_REMOTE", default=False)
_UpperCAmelCase : Optional[int] = parse_flag_from_env("RUN_LOCAL", default=True)
_UpperCAmelCase : Dict = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
_UpperCAmelCase : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
_UpperCAmelCase : Union[str, Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
_UpperCAmelCase : str = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
_UpperCAmelCase : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
_UpperCAmelCase : Dict = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
_UpperCAmelCase : Tuple = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
_UpperCAmelCase : Any = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('test requires faiss' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( lowercase ) -> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('test requires regex' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( lowercase ) -> str:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('test requires elasticsearch' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( lowercase ) -> Dict:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('test requires sqlalchemy' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('test requires PyTorch' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( lowercase ) -> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('test requires TensorFlow' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('test requires JAX' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( lowercase ) -> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('test requires Pillow' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( lowercase ) -> int:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def A ( lowercase ) -> List[str]:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def A ( lowercase ) -> Any:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def A ( lowercase ) -> Dict:
'''simple docstring'''
def _require_spacy_model(lowercase ):
try:
import spacy # noqa F401
spacy.load(_SCREAMING_SNAKE_CASE )
except ImportError:
return unittest.skip('test requires spacy' )(_SCREAMING_SNAKE_CASE )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(_SCREAMING_SNAKE_CASE ) )(_SCREAMING_SNAKE_CASE )
else:
return test_case
return _require_spacy_model
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def A ( lowercase ) -> int:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('test is slow' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( lowercase ) -> Tuple:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('test is local' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( lowercase ) -> List[str]:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('test is packaged' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('test requires remote' )(_SCREAMING_SNAKE_CASE )
return test_case
def A ( *lowercase ) -> Tuple:
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_SCREAMING_SNAKE_CASE ) and name.startswith('test' ):
for decorator in decorators:
UpperCamelCase = decorator(_SCREAMING_SNAKE_CASE )
setattr(cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return cls
return decorate
class lowercase ( _SCREAMING_SNAKE_CASE ):
pass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[Any] = 0
__lowercase : Any = 1
__lowercase : Optional[Any] = 2
@contextmanager
def A ( lowercase=OfflineSimulationMode.CONNECTION_FAILS , lowercase=1e-16 ) -> Tuple:
'''simple docstring'''
UpperCamelCase = requests.Session().request
def timeout_request(lowercase , lowercase , lowercase , **lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
UpperCamelCase = timeout
try:
return online_request(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , f'''OfflineMock[{url}]''' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(lowercase , lowercase , **lowercase ):
raise requests.ConnectionError('Offline mode is enabled.' , request=_SCREAMING_SNAKE_CASE )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , _SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , _SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def A ( *lowercase , **lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) as tmp_dir:
try:
os.chdir(_SCREAMING_SNAKE_CASE )
yield
finally:
os.chdir(_SCREAMING_SNAKE_CASE )
@contextmanager
def A ( ) -> str:
'''simple docstring'''
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def A ( ) -> Tuple:
'''simple docstring'''
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def A ( lowercase , lowercase ) -> Dict:
'''simple docstring'''
return deepcopy(_SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist() == deepcopy(_SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist()
def A ( lowercase ) -> List[str]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowercase , *lowercase , **lowercase ):
try:
return func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except HTTPError as err:
if str(_SCREAMING_SNAKE_CASE ).startswith('500' ) or str(_SCREAMING_SNAKE_CASE ).startswith('502' ):
pytest.xfail(str(_SCREAMING_SNAKE_CASE ) )
raise err
return decorator.decorator(_wrapper , _SCREAMING_SNAKE_CASE )
class lowercase :
def __init__( self , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def A ( lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_SCREAMING_SNAKE_CASE )
else:
break
async def A ( lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=False , lowercase=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(lowercase , lowercase , lowercase , lowercase="" ):
UpperCamelCase = line.decode('utf-8' ).rstrip()
sink.append(_SCREAMING_SNAKE_CASE )
if not quiet:
print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , file=_SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowercase : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda lowercase : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stderr , label='stderr:' ) ),
] , timeout=_SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A ( lowercase , lowercase=None , lowercase=None , lowercase=180 , lowercase=False , lowercase=True ) -> _RunOutput:
'''simple docstring'''
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_SCREAMING_SNAKE_CASE , env=_SCREAMING_SNAKE_CASE , stdin=_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE , echo=_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = ' '.join(_SCREAMING_SNAKE_CASE )
if result.returncode > 0:
UpperCamelCase = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def A ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
UpperCamelCase = re.sub(R'^gw' , '' , _SCREAMING_SNAKE_CASE , 0 , re.M )
return int(_SCREAMING_SNAKE_CASE )
def A ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = 29_500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 700 |
def A ( lowercase , lowercase ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
from typing import Any
class __lowerCamelCase :
def __init__( self: int,A_: Any ):
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self: Any ):
'''simple docstring'''
return F'''Node({self.data})'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self: int ):
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self: List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self: Any ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __getitem__( self: int,A_: int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: int,A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__UpperCamelCase = self.head
for _ in range(A_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
self.insert_nth(len(self ),A_ )
def snake_case_ ( self: List[Any],A_: Any ):
'''simple docstring'''
self.insert_nth(0,A_ )
def snake_case_ ( self: Optional[Any],A_: int,A_: Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__UpperCamelCase = Node(A_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def snake_case_ ( self: str ): # print every node data
'''simple docstring'''
print(self )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case_ ( self: str ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self: Any,A_: int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def snake_case_ ( self: Any ):
'''simple docstring'''
return self.head is None
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase , i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__UpperCamelCase = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
__UpperCamelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 1 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __a ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = "arrow" , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ = load_from_cache_file
lowerCAmelCase_ = file_format
lowerCAmelCase_ = Spark(
df=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , working_dir=UpperCAmelCase , **UpperCAmelCase , )
def lowerCamelCase_ ( self ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 552 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase__ : Union[str, Any] = logging.getLogger(__name__)
UpperCamelCase__ : List[Any] = tf.data.AUTOTUNE
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_lowerCamelCase , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_lowerCamelCase , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_lowerCamelCase , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_lowerCamelCase , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_lowerCamelCase , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_lowerCamelCase , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_lowerCamelCase , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_lowerCamelCase , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_lowerCamelCase , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_lowerCamelCase , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_lowerCamelCase , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_lowerCamelCase , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_lowerCamelCase , default=5_12 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_lowerCamelCase , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_lowerCamelCase , help="""Model ID to upload to on the Hugging Face Hub.""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
return args
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
try:
if args.tpu_name:
__SCREAMING_SNAKE_CASE : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_lowerCamelCase )
tf.tpu.experimental.initialize_tpu_system(_lowerCamelCase )
return tpu
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[str] = 0
for file in file_list:
__SCREAMING_SNAKE_CASE : List[Any] = file.split("""/""" )[-1]
__SCREAMING_SNAKE_CASE : Optional[int] = re.search(r"""-\d+-(\d+)\.tfrecord""" , _lowerCamelCase ).group(1 )
__SCREAMING_SNAKE_CASE : List[Any] = int(_lowerCamelCase )
num_samples += sample_count
return num_samples
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: Optional[int] , _lowerCamelCase: int , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: List[Any]=None ):
__SCREAMING_SNAKE_CASE : Optional[int] = count_samples(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = tf.data.Dataset.from_tensor_slices(_lowerCamelCase )
if shuffle:
__SCREAMING_SNAKE_CASE : Tuple = dataset.shuffle(len(_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : str = tf.data.TFRecordDataset(_lowerCamelCase , num_parallel_reads=_lowerCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__SCREAMING_SNAKE_CASE : str = dataset.apply(tf.data.experimental.assert_cardinality(_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Dict = dataset.map(_lowerCamelCase , num_parallel_calls=_lowerCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
__SCREAMING_SNAKE_CASE : List[Any] = dataset.shuffle(args.shuffle_buffer_size )
__SCREAMING_SNAKE_CASE : str = dataset.batch(_lowerCamelCase , drop_remainder=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(_lowerCamelCase , num_parallel_calls=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = dataset.prefetch(_lowerCamelCase )
return dataset
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if not args.no_tpu:
__SCREAMING_SNAKE_CASE : List[str] = initialize_tpu(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = tf.distribute.TPUStrategy(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : int = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
__SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(args.tokenizer )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
__SCREAMING_SNAKE_CASE : int = tokenizer.vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F"No .tfrecord files found in {args.train_dataset}." )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F"No .tfrecord files found in {args.eval_dataset}." )
__SCREAMING_SNAKE_CASE : List[str] = count_samples(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__SCREAMING_SNAKE_CASE : Any = steps_per_epoch * args.num_epochs
with strategy.scope():
__SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForMaskedLM.from_config(_lowerCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = create_optimizer(
num_train_steps=_lowerCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_lowerCamelCase , metrics=["""accuracy"""] )
def decode_fn(_lowerCamelCase: Optional[Any] ):
__SCREAMING_SNAKE_CASE : str = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_lowerCamelCase , _lowerCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__SCREAMING_SNAKE_CASE : Any = DataCollatorForLanguageModeling(
tokenizer=_lowerCamelCase , mlm_probability=args.mlm_probability , mlm=_lowerCamelCase , return_tensors="""tf""" )
def mask_with_collator(_lowerCamelCase: Tuple ):
# TF really needs an isin() function
__SCREAMING_SNAKE_CASE : Dict = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_lowerCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowerCamelCase , )
return batch
__SCREAMING_SNAKE_CASE : Tuple = args.per_replica_batch_size * strategy.num_replicas_in_sync
__SCREAMING_SNAKE_CASE : Any = prepare_dataset(
_lowerCamelCase , decode_fn=_lowerCamelCase , mask_fn=_lowerCamelCase , batch_size=_lowerCamelCase , shuffle=_lowerCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_dataset(
_lowerCamelCase , decode_fn=_lowerCamelCase , mask_fn=_lowerCamelCase , batch_size=_lowerCamelCase , shuffle=_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Dict = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowerCamelCase ) )
model.fit(
_lowerCamelCase , validation_data=_lowerCamelCase , epochs=args.num_epochs , callbacks=_lowerCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = parse_args()
main(args) | 178 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : List[Any] = SwinvaConfig()
__SCREAMING_SNAKE_CASE : List[Any] = swinva_name.split("""_""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = name_split[1]
if "to" in name_split[3]:
__SCREAMING_SNAKE_CASE : Dict = int(name_split[3][-3:] )
else:
__SCREAMING_SNAKE_CASE : str = int(name_split[3] )
if "to" in name_split[2]:
__SCREAMING_SNAKE_CASE : Optional[Any] = int(name_split[2][-2:] )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = int(name_split[2][6:] )
if model_size == "tiny":
__SCREAMING_SNAKE_CASE : Dict = 96
__SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
__SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif model_size == "small":
__SCREAMING_SNAKE_CASE : List[str] = 96
__SCREAMING_SNAKE_CASE : int = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif model_size == "base":
__SCREAMING_SNAKE_CASE : int = 1_28
__SCREAMING_SNAKE_CASE : str = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Optional[int] = (4, 8, 16, 32)
else:
__SCREAMING_SNAKE_CASE : List[str] = 1_92
__SCREAMING_SNAKE_CASE : Union[str, Any] = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Dict = (6, 12, 24, 48)
if "to" in swinva_name:
__SCREAMING_SNAKE_CASE : int = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__SCREAMING_SNAKE_CASE : int = 2_18_41
__SCREAMING_SNAKE_CASE : str = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE : List[str] = """imagenet-22k-id2label.json"""
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()}
else:
__SCREAMING_SNAKE_CASE : str = 10_00
__SCREAMING_SNAKE_CASE : Optional[int] = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE : Any = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE : int = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Any = img_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_classes
__SCREAMING_SNAKE_CASE : int = embed_dim
__SCREAMING_SNAKE_CASE : Dict = depths
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : int = window_size
return config
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = """encoder.""" + name
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
__SCREAMING_SNAKE_CASE : str = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE : Tuple = """layernorm.weight"""
if name == "norm.bias":
__SCREAMING_SNAKE_CASE : Optional[int] = """layernorm.bias"""
if "head" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""head""" , """classifier""" )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = """swinv2.""" + name
return name
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: Optional[Any] ):
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Optional[Any] = orig_state_dict.pop(_lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" )
__SCREAMING_SNAKE_CASE : List[str] = int(key_split[1] )
__SCREAMING_SNAKE_CASE : Dict = int(key_split[3] )
__SCREAMING_SNAKE_CASE : str = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Optional[int] = val[:dim, :]
__SCREAMING_SNAKE_CASE : str = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val
return orig_state_dict
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
__SCREAMING_SNAKE_CASE : int = get_swinva_config(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = SwinvaForImageClassification(_lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(timm_model.state_dict() , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE : int = timm_model(inputs["""pixel_values"""] )
__SCREAMING_SNAKE_CASE : Dict = model(**_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 178 | 1 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase_ = {'''UserAgent''': UserAgent().random}
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = script.contents[0]
A__ = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _snake_case:
def __init__(self : List[Any] , a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = f"""https://www.instagram.com/{username}/"""
A__ = self.get_json()
def _UpperCamelCase (self : Tuple ) -> dict:
"""simple docstring"""
A__ = requests.get(self.url , headers=a ).text
A__ = BeautifulSoup(a , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self : Any ) -> str:
"""simple docstring"""
return f"""{self.__class__.__name__}('{self.username}')"""
def __str__(self : Union[str, Any] ) -> str:
"""simple docstring"""
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def _UpperCamelCase (self : List[Any] ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def _UpperCamelCase (self : Tuple ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def _UpperCamelCase (self : List[Any] ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def _UpperCamelCase (self : Dict ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def _UpperCamelCase (self : Tuple ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def _UpperCamelCase (self : Optional[int] ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase (self : str ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase (self : List[Any] ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase (self : List[Any] ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase (self : Optional[int] ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def _UpperCamelCase (self : List[Any] ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def _A ( UpperCAmelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
A__ = InstagramUser(UpperCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,UpperCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = InstagramUser('''github''')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 531 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=True ,UpperCAmelCase="pt" ):
'''simple docstring'''
A__ = {'add_prefix_space': True} if isinstance(UpperCAmelCase ,UpperCAmelCase ) and not line.startswith(' ' ) else {}
A__ = padding_side
return tokenizer(
[line] ,max_length=UpperCAmelCase ,padding='max_length' if pad_to_max_length else None ,truncation=UpperCAmelCase ,return_tensors=UpperCAmelCase ,add_special_tokens=UpperCAmelCase ,**UpperCAmelCase ,)
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=None ,):
'''simple docstring'''
A__ = input_ids.ne(UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _snake_case( UpperCAmelCase ):
def __init__(self : List[str] , a : Any , a : Union[str, Any] , a : int , a : Optional[int] , a : Optional[Any]="train" , a : Optional[int]=None , a : Tuple=None , a : str=None , a : Dict="" , ) -> Dict:
"""simple docstring"""
super().__init__()
A__ = Path(a ).joinpath(type_path + '.source' )
A__ = Path(a ).joinpath(type_path + '.target' )
A__ = self.get_char_lens(self.src_file )
A__ = max_source_length
A__ = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
A__ = tokenizer
A__ = prefix
if n_obs is not None:
A__ = self.src_lens[:n_obs]
A__ = src_lang
A__ = tgt_lang
def __len__(self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return len(self.src_lens )
def __getitem__(self : Any , a : str ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A__ = index + 1 # linecache starts at 1
A__ = self.prefix + linecache.getline(str(self.src_file ) , a ).rstrip('\n' )
A__ = linecache.getline(str(self.tgt_file ) , a ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A__ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a ) else self.tokenizer
)
A__ = self.tokenizer.generator if isinstance(self.tokenizer , a ) else self.tokenizer
A__ = encode_line(a , a , self.max_source_length , 'right' )
A__ = encode_line(a , a , self.max_target_length , 'right' )
A__ = source_inputs['input_ids'].squeeze()
A__ = target_inputs['input_ids'].squeeze()
A__ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCamelCase (a : Tuple ) -> Tuple:
"""simple docstring"""
return [len(a ) for x in Path(a ).open().readlines()]
def _UpperCamelCase (self : str , a : int ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A__ = torch.stack([x['input_ids'] for x in batch] )
A__ = torch.stack([x['attention_mask'] for x in batch] )
A__ = torch.stack([x['decoder_input_ids'] for x in batch] )
A__ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
A__ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
A__ = trim_batch(a , a )
A__ , A__ = trim_batch(a , a , attention_mask=a )
A__ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase_ = getLogger(__name__)
def _A ( UpperCAmelCase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(UpperCAmelCase ) )
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = get_git_info()
save_json(UpperCAmelCase ,os.path.join(UpperCAmelCase ,'git_log.json' ) )
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=4 ,**UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ,'w' ) as f:
json.dump(UpperCAmelCase ,UpperCAmelCase ,indent=UpperCAmelCase ,**UpperCAmelCase )
def _A ( UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ) as f:
return json.load(UpperCAmelCase )
def _A ( ):
'''simple docstring'''
A__ = git.Repo(search_parent_directories=UpperCAmelCase )
A__ = {
'repo_id': str(UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
return list(map(UpperCAmelCase ,UpperCAmelCase ) )
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ,'wb' ) as f:
return pickle.dump(UpperCAmelCase ,UpperCAmelCase )
def _A ( UpperCAmelCase ):
'''simple docstring'''
def remove_articles(UpperCAmelCase ):
return re.sub(r'\b(a|an|the)\b' ,' ' ,UpperCAmelCase )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
A__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase ) ) ) )
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = normalize_answer(UpperCAmelCase ).split()
A__ = normalize_answer(UpperCAmelCase ).split()
A__ = Counter(UpperCAmelCase ) & Counter(UpperCAmelCase )
A__ = sum(common.values() )
if num_same == 0:
return 0
A__ = 1.0 * num_same / len(UpperCAmelCase )
A__ = 1.0 * num_same / len(UpperCAmelCase )
A__ = (2 * precision * recall) / (precision + recall)
return fa
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
return normalize_answer(UpperCAmelCase ) == normalize_answer(UpperCAmelCase )
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
A__ = 0
for hypo, pred in zip(UpperCAmelCase ,UpperCAmelCase ):
em += exact_match_score(UpperCAmelCase ,UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
em /= len(UpperCAmelCase )
return {"em": em}
def _A ( UpperCAmelCase ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A__ = 'dropout_rate'
for p in extra_params:
if getattr(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
if not hasattr(UpperCAmelCase ,UpperCAmelCase ) and not hasattr(UpperCAmelCase ,equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(UpperCAmelCase ) )
delattr(UpperCAmelCase ,UpperCAmelCase )
continue
A__ = p if hasattr(UpperCAmelCase ,UpperCAmelCase ) else equivalent_param[p]
setattr(UpperCAmelCase ,UpperCAmelCase ,getattr(UpperCAmelCase ,UpperCAmelCase ) )
delattr(UpperCAmelCase ,UpperCAmelCase )
return hparams, config
| 531 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __SCREAMING_SNAKE_CASE , )
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
__A : Union[str, Any] = RobertaConfig
__A : Tuple = 'roberta'
def __init__( self : Dict , _A : Optional[int]):
super().__init__(__snake_case)
A__ : Optional[int] = RobertaEmbeddings(__snake_case)
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __SCREAMING_SNAKE_CASE , )
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
__A : Dict = RobertaConfig
__A : Tuple = 'roberta'
def __init__( self : List[str] , _A : Optional[Any]):
super().__init__(__snake_case)
A__ : Any = config.num_labels
A__ : Union[str, Any] = config.num_hidden_layers
A__ : Optional[int] = DeeRobertaModel(__snake_case)
A__ : Dict = nn.Dropout(config.hidden_dropout_prob)
A__ : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(__snake_case)
def _lowercase ( self : int , _A : Optional[int]=None , _A : int=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Any=None , _A : int=None , _A : Union[str, Any]=None , _A : Dict=-1 , _A : Optional[Any]=False , ):
A__ : List[str] = self.num_layers
try:
A__ : Tuple = self.roberta(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , )
A__ : int = outputs[1]
A__ : str = self.dropout(__snake_case)
A__ : List[Any] = self.classifier(__snake_case)
A__ : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ : Union[str, Any] = e.message
A__ : List[Any] = e.exit_layer
A__ : Union[str, Any] = outputs[0]
if not self.training:
A__ : List[str] = entropy(__snake_case)
A__ : Union[str, Any] = []
A__ : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ : List[str] = MSELoss()
A__ : Tuple = loss_fct(logits.view(-1) , labels.view(-1))
else:
A__ : List[str] = CrossEntropyLoss()
A__ : Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
A__ : List[Any] = []
for highway_exit in outputs[-1]:
A__ : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(__snake_case)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
A__ : Dict = MSELoss()
A__ : int = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
A__ : int = CrossEntropyLoss()
A__ : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(__snake_case)
if train_highway:
A__ : Dict = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
A__ : Union[str, Any] = (loss,) + outputs
if not self.training:
A__ : List[str] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy | 705 |
from collections.abc import Callable
import numpy as np
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.array:
"""simple docstring"""
A__ : Any = int(np.ceil((x_end - xa) / step_size ) )
A__ : Union[str, Any] = np.zeros((n + 1,) )
A__ : Any = ya
A__ : Union[str, Any] = xa
for k in range(__lowercase ):
A__ : Any = y[k] + step_size * ode_func(__lowercase , y[k] )
A__ : Any = y[k] + (
(step_size / 2) * (ode_func(__lowercase , y[k] ) + ode_func(x + step_size , __lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __lowercase ( lowercase) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __lowercase ( self) -> int:
'''simple docstring'''
raise NotImplementedError()
| 302 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase : Any = logging.get_logger(__name__)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase) -> None:
'''simple docstring'''
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase)
| 302 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=6 , a__=17 , a__=23 , a__=11 , a__=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = act_dim
_UpperCAmelCase = state_dim
_UpperCAmelCase = hidden_size
_UpperCAmelCase = max_length
_UpperCAmelCase = is_training
def __A ( self ):
_UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
_UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
_UpperCAmelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
_UpperCAmelCase = random_attention_mask((self.batch_size, self.seq_length) )
_UpperCAmelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __A ( self ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_UpperCAmelCase = DecisionTransformerModel(config=__A )
model.to(__A )
model.eval()
_UpperCAmelCase = model(__A , __A , __A , __A , __A , __A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __A ( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( snake_case , snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def __A ( self ):
_UpperCAmelCase = DecisionTransformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__A , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@slow
def __A ( self ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DecisionTransformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __A ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__A )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(__A )] , __A )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
_UpperCAmelCase = 2 # number of steps of autoregressive prediction we will perform
_UpperCAmelCase = 10 # defined by the RL environment, may be normalized
_UpperCAmelCase = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
_UpperCAmelCase = model.to(__A )
_UpperCAmelCase = model.config
torch.manual_seed(0 )
_UpperCAmelCase = torch.randn(1 , 1 , config.state_dim ).to(device=__A , dtype=torch.floataa ) # env.reset()
_UpperCAmelCase = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=__A )
_UpperCAmelCase = torch.tensor(__A , device=__A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_UpperCAmelCase = state
_UpperCAmelCase = torch.zeros(1 , 0 , config.act_dim , device=__A , dtype=torch.floataa )
_UpperCAmelCase = torch.zeros(1 , 0 , device=__A , dtype=torch.floataa )
_UpperCAmelCase = torch.tensor(0 , device=__A , dtype=torch.long ).reshape(1 , 1 )
for step in range(__A ):
_UpperCAmelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__A )] , dim=1 )
_UpperCAmelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=__A )] , dim=1 )
_UpperCAmelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model(
states=__A , actions=__A , rewards=__A , returns_to_go=__A , timesteps=__A , attention_mask=__A , return_dict=__A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__A , dtype=torch.floataa ),
1.0,
False,
{},
)
_UpperCAmelCase = action_pred[0, -1]
_UpperCAmelCase = torch.cat([states, state] , dim=1 )
_UpperCAmelCase = returns_to_go[0, -1] - reward
_UpperCAmelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_UpperCAmelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=__A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 718 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCAmelCase ( snake_case , unittest.TestCase ):
lowerCAmelCase__ = RoFormerTokenizer
lowerCAmelCase__ = RoFormerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def __A ( self ):
super().setUp()
def __A ( self , **a__ ):
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a__ )
def __A ( self , **a__ ):
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a__ )
def __A ( self ):
_UpperCAmelCase = '永和服装饰品有限公司,今天天气非常好'
_UpperCAmelCase = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def __A ( self ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase , _UpperCAmelCase = self.get_chinese_input_output_texts()
_UpperCAmelCase = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , output_text.split() )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase , _UpperCAmelCase = self.get_chinese_input_output_texts()
_UpperCAmelCase = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , output_text.split() )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
pass
def __A ( self ):
pass
def __A ( self ):
pass
| 494 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
from string import ascii_uppercase
lowerCamelCase__ = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCamelCase__ = dict(enumerate(ascii_uppercase))
def A(__a: str , __a: str ):
lowerCAmelCase_ = len(__a )
lowerCAmelCase_ = 0
while True:
if x == i:
lowerCAmelCase_ = 0
if len(__a ) == len(__a ):
break
key += key[i]
i += 1
return key
def A(__a: str , __a: str ):
lowerCAmelCase_ = ""
lowerCAmelCase_ = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCAmelCase_ = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A(__a: str , __a: str ):
lowerCAmelCase_ = ""
lowerCAmelCase_ = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCAmelCase_ = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A():
lowerCAmelCase_ = "THE GERMAN ATTACK"
lowerCAmelCase_ = "SECRET"
lowerCAmelCase_ = generate_key(__a , __a )
lowerCAmelCase_ = cipher_text(__a , __a )
print(F"Encrypted Text = {s}" )
print(F"Original Text = {original_text(__a , __a )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 122 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ : Optional[Any] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] =["""input_features""", """attention_mask"""]
def __init__( self : List[str] , UpperCAmelCase__ : Optional[Any]=8_0 , UpperCAmelCase__ : List[str]=1_6_0_0_0 , UpperCAmelCase__ : Optional[int]=8_0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[int] , ) ->Optional[int]:
"""simple docstring"""
super().__init__(feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = num_mel_bins
SCREAMING_SNAKE_CASE : Dict = do_ceptral_normalize
SCREAMING_SNAKE_CASE : Optional[int] = normalize_means
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_vars
SCREAMING_SNAKE_CASE : Union[str, Any] = True
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , ) ->np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = ta_kaldi.fbank(UpperCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowercase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[bool] = True , UpperCAmelCase__ : Optional[bool] = True , UpperCAmelCase__ : float = 0.0 , ) ->np.ndarray:
"""simple docstring"""
if normalize_means:
SCREAMING_SNAKE_CASE : Tuple = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE : List[Any] = np.subtract(UpperCAmelCase__ , UpperCAmelCase__ )
if normalize_vars:
SCREAMING_SNAKE_CASE : Optional[Any] = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.divide(UpperCAmelCase__ , UpperCAmelCase__ )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE : Tuple = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE : Optional[Any] = x.astype(np.floataa )
return x
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[np.ndarray] , UpperCAmelCase__ : Optional[np.ndarray] = None ) ->List[np.ndarray]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase__ , UpperCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase__ , UpperCAmelCase__ )
]
def __call__( self : str , UpperCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
SCREAMING_SNAKE_CASE : Tuple = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : Optional[Any] = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Dict = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Optional[Any] = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE : Tuple = [self._extract_fbank_features(UpperCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({"""input_features""": features} )
SCREAMING_SNAKE_CASE : str = self.pad(
UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE : Dict = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE : int = [np.asarray(UpperCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE : Tuple = (
np.array(UpperCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE : str = self.normalize(
padded_inputs["""input_features"""] , attention_mask=UpperCAmelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Any = padded_inputs.convert_to_tensors(UpperCAmelCase__ )
return padded_inputs
| 446 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase_ = '''__DUMMY_TRANSFORMERS_USER__'''
lowercase_ = '''Dummy User'''
lowercase_ = '''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'''
lowercase_ = '''https://hub-ci.huggingface.co'''
lowercase_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
lowercase_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
lowercase_ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCAmelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''', UpperCAmelCase )
@pytest.fixture
def lowerCAmelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''', UpperCAmelCase )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''', UpperCAmelCase )
@pytest.fixture
def lowerCAmelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''', UpperCAmelCase )
@pytest.fixture
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
HfFolder.save_token(UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def lowerCAmelCase ( ) ->Any:
"""simple docstring"""
return HfApi(endpoint=UpperCAmelCase )
@pytest.fixture(scope='''session''' )
def lowerCAmelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
__magic_name__ : str = HfFolder.get_token()
HfFolder.save_token(UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCAmelCase )
@pytest.fixture
def lowerCAmelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
def _cleanup_repo(UpperCAmelCase ):
hf_api.delete_repo(UpperCAmelCase, token=UpperCAmelCase, repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def lowerCAmelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
@contextmanager
def _temporary_repo(UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Dict:
"""simple docstring"""
__magic_name__ : Dict = F'''repo_txt_data-{int(time.time() * 10E3 )}'''
__magic_name__ : Union[str, Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase, token=UpperCAmelCase, repo_type='''dataset''', private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase, path_or_fileobj=str(UpperCAmelCase ), path_in_repo='''data/text_data.txt''', repo_id=UpperCAmelCase, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase, token=UpperCAmelCase, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Dict:
"""simple docstring"""
__magic_name__ : str = F'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
__magic_name__ : Dict = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase, token=UpperCAmelCase, repo_type='''dataset''', private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase, path_or_fileobj=str(UpperCAmelCase ), path_in_repo='''data.zip''', repo_id=UpperCAmelCase, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase, token=UpperCAmelCase, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Any:
"""simple docstring"""
__magic_name__ : Dict = F'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
__magic_name__ : Optional[int] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase, token=UpperCAmelCase, repo_type='''dataset''', private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase, path_or_fileobj=str(UpperCAmelCase ), path_in_repo='''data.zip''', repo_id=UpperCAmelCase, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase, token=UpperCAmelCase, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 154 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowerCAmelCase :
'''simple docstring'''
__lowerCamelCase : int = LEDConfig
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Optional[int] = "gelu"
def __init__( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=13 , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Dict=False , UpperCamelCase_: Tuple=99 , UpperCamelCase_: Dict=32 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Union[str, Any]=4 , UpperCamelCase_: str=37 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]=20 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=1 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: str=4 , ):
UpperCamelCase_ =parent
UpperCamelCase_ =batch_size
UpperCamelCase_ =seq_length
UpperCamelCase_ =is_training
UpperCamelCase_ =use_labels
UpperCamelCase_ =vocab_size
UpperCamelCase_ =hidden_size
UpperCamelCase_ =num_hidden_layers
UpperCamelCase_ =num_attention_heads
UpperCamelCase_ =intermediate_size
UpperCamelCase_ =hidden_dropout_prob
UpperCamelCase_ =attention_probs_dropout_prob
UpperCamelCase_ =max_position_embeddings
UpperCamelCase_ =eos_token_id
UpperCamelCase_ =pad_token_id
UpperCamelCase_ =bos_token_id
UpperCamelCase_ =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCamelCase_ =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCamelCase_ =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ =tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCamelCase_ =prepare_led_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ =tf.concat(
[tf.zeros_like(UpperCamelCase_ )[:, :-1], tf.ones_like(UpperCamelCase_ )[:, -1:]] , axis=-1 , )
UpperCamelCase_ =global_attention_mask
return config, inputs_dict
def UpperCamelCase__ ( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] ):
UpperCamelCase_ =TFLEDModel(config=UpperCamelCase_ ).get_decoder()
UpperCamelCase_ =inputs_dict["input_ids"]
UpperCamelCase_ =input_ids[:1, :]
UpperCamelCase_ =inputs_dict["attention_mask"][:1, :]
UpperCamelCase_ =1
# first forward pass
UpperCamelCase_ =model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
UpperCamelCase_ , UpperCamelCase_ =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ =ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_ =tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_ =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_ =model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
UpperCamelCase_ =model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_ =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_ =output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_ =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1e-3 )
def _UpperCamelCase ( A , A , A , A=None , A=None , A=None , A=None , ):
if attention_mask is None:
UpperCamelCase_ =tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_ =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_ =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_ =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowerCAmelCase ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__lowerCamelCase : Dict = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : int = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Optional[int] = False
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =TFLEDModelTester(self )
UpperCamelCase_ =ConfigTester(self , config_class=UpperCamelCase_ )
def UpperCamelCase__ ( self: Any ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ , UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ =tf.zeros_like(inputs_dict["attention_mask"] )
UpperCamelCase_ =2
UpperCamelCase_ =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
UpperCamelCase_ =True
UpperCamelCase_ =self.model_tester.seq_length
UpperCamelCase_ =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCamelCase_: Union[str, Any] ):
UpperCamelCase_ =outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCamelCase_: Optional[Any] ):
UpperCamelCase_ =[t.numpy() for t in outputs.encoder_attentions]
UpperCamelCase_ =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCamelCase_ =True
UpperCamelCase_ =False
UpperCamelCase_ =False
UpperCamelCase_ =model_class(UpperCamelCase_ )
UpperCamelCase_ =model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase_ =len(UpperCamelCase_ )
self.assertEqual(config.output_hidden_states , UpperCamelCase_ )
check_encoder_attentions_output(UpperCamelCase_ )
if self.is_encoder_decoder:
UpperCamelCase_ =model_class(UpperCamelCase_ )
UpperCamelCase_ =model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase_ )
check_decoder_attentions_output(UpperCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase_ =True
UpperCamelCase_ =model_class(UpperCamelCase_ )
UpperCamelCase_ =model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase_ )
check_encoder_attentions_output(UpperCamelCase_ )
# Check attention is always last and order is fine
UpperCamelCase_ =True
UpperCamelCase_ =True
UpperCamelCase_ =model_class(UpperCamelCase_ )
UpperCamelCase_ =model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase_ )
check_encoder_attentions_output(UpperCamelCase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase__ ( self: Union[str, Any] ):
pass
def UpperCamelCase__ ( self: Dict ):
# TODO: Head-masking not yet implement
pass
def _UpperCamelCase ( A ):
return tf.constant(A , dtype=tf.intaa )
A_ = 1e-4
@slow
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
UpperCamelCase_ =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase_ =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase_ =prepare_led_inputs_dict(model.config , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ =model(**UpperCamelCase_ )[0]
UpperCamelCase_ =(1, 1024, 768)
self.assertEqual(output.shape , UpperCamelCase_ )
# change to expected output here
UpperCamelCase_ =tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase_ , atol=1e-3 )
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
UpperCamelCase_ =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase_ =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase_ =prepare_led_inputs_dict(model.config , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ =model(**UpperCamelCase_ )[0]
UpperCamelCase_ =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
# change to expected output here
UpperCamelCase_ =tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase_ , atol=1e-3 , rtol=1e-3 )
| 391 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _lowercase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__lowerCAmelCase : Any = take_from
__lowerCAmelCase : List[Any] = ()
if not isinstance(args[0] , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
__lowerCAmelCase : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__lowerCAmelCase : Any = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__lowerCAmelCase : Union[str, Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
__lowerCAmelCase : List[str] = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
__lowerCAmelCase : Dict = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__lowerCAmelCase : Tuple = inspect.getouterframes(inspect.currentframe() )[1]
__lowerCAmelCase : Dict = call_frame.filename
__lowerCAmelCase : Optional[int] = call_frame.lineno
__lowerCAmelCase : Optional[Any] = call_frame.function
__lowerCAmelCase, __lowerCAmelCase : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 583 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """time_series_transformer"""
_UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 64 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_=True , **A_ , ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = prediction_length
__lowerCAmelCase : Tuple = context_length or prediction_length
__lowerCAmelCase : str = distribution_output
__lowerCAmelCase : Any = loss
__lowerCAmelCase : List[str] = input_size
__lowerCAmelCase : Any = num_time_features
__lowerCAmelCase : Optional[int] = lags_sequence
__lowerCAmelCase : Any = scaling
__lowerCAmelCase : Dict = num_dynamic_real_features
__lowerCAmelCase : Any = num_static_real_features
__lowerCAmelCase : Optional[Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowerCAmelCase : Any = cardinality
else:
__lowerCAmelCase : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowerCAmelCase : List[Any] = embedding_dimension
else:
__lowerCAmelCase : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCAmelCase : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
__lowerCAmelCase : List[Any] = input_size * len(A_ ) + self._number_of_features
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_attention_heads
__lowerCAmelCase : int = decoder_attention_heads
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : int = decoder_ffn_dim
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : List[Any] = decoder_layers
__lowerCAmelCase : Dict = dropout
__lowerCAmelCase : int = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : Optional[Any] = encoder_layerdrop
__lowerCAmelCase : str = decoder_layerdrop
__lowerCAmelCase : Optional[Any] = activation_function
__lowerCAmelCase : Optional[Any] = init_std
__lowerCAmelCase : int = use_cache
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 583 | 1 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase__ ( __a ):
lowercase__ : str = (DPMSolverSDEScheduler,)
lowercase__ : Union[str, Any] = 10
def lowercase_ ( self , **UpperCamelCase__ ):
'''simple docstring'''
A__ = {
"num_train_timesteps": 11_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_lowerCamelCase )
return config
def lowercase_ ( self ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def lowercase_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def lowercase_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def lowercase_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
A__ = model(_lowerCamelCase , _lowerCamelCase )
A__ = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(_lowerCamelCase ) )
A__ = torch.mean(torch.abs(_lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type="v_prediction" )
A__ = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
A__ = model(_lowerCamelCase , _lowerCamelCase )
A__ = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(_lowerCamelCase ) )
A__ = torch.mean(torch.abs(_lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(_lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
A__ = model(_lowerCamelCase , _lowerCamelCase )
A__ = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(_lowerCamelCase ) )
A__ = torch.mean(torch.abs(_lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(_lowerCamelCase ) * scheduler.init_noise_sigma
A__ = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
A__ = model(_lowerCamelCase , _lowerCamelCase )
A__ = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(_lowerCamelCase ) )
A__ = torch.mean(torch.abs(_lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 | 337 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 118 | 0 |
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : Tuple = set_counts
SCREAMING_SNAKE_CASE : Optional[int] = max(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = [1] * num_sets
SCREAMING_SNAKE_CASE : Union[str, Any] = list(range(_lowerCamelCase ) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->bool:
SCREAMING_SNAKE_CASE : Dict = self.get_parent(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.get_parent(_lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Tuple = src_parent
SCREAMING_SNAKE_CASE : List[str] = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : List[str] = max(self.max_set , _lowerCamelCase )
return True
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 333 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a_ ( a__ ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , '''depth_multiplier''' ) )
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=0.2_5 , _lowerCamelCase=8 , _lowerCamelCase=True , _lowerCamelCase=1024 , _lowerCamelCase=32 , _lowerCamelCase="relu6" , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=None , ) ->List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
SCREAMING_SNAKE_CASE : Optional[Any] = min_depth
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_padding
SCREAMING_SNAKE_CASE : Optional[Any] = int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE : Any = output_stride
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Any = num_labels
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = scope
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCAmelCase ( self ) ->Any:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : str = MobileNetVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = outputs.hidden_states
SCREAMING_SNAKE_CASE : Tuple = 26
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileNetVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->Any:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 333 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 32
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = '''gelu'''
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 5_12
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = None
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = TFRoFormerModel(config=__a)
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = True
_UpperCamelCase = TFRoFormerForCausalLM(config=__a)
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(__a)['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape) , [self.batch_size, self.seq_length, self.vocab_size])
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = TFRoFormerForMaskedLM(config=__a)
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFRoFormerForSequenceClassification(config=__a)
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFRoFormerForMultipleChoice(config=__a)
_UpperCamelCase = tf.tile(tf.expand_dims(__a , 1) , (1, self.num_choices, 1))
_UpperCamelCase = tf.tile(tf.expand_dims(__a , 1) , (1, self.num_choices, 1))
_UpperCamelCase = tf.tile(tf.expand_dims(__a , 1) , (1, self.num_choices, 1))
_UpperCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFRoFormerForTokenClassification(config=__a)
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> str:
'''simple docstring'''
_UpperCamelCase = TFRoFormerForQuestionAnswering(config=__a)
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFRoFormerModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
@slow
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''')
self.assertIsNotNone(__a)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''')
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]])
_UpperCamelCase = model(__a)[0]
# TODO Replace vocab size
_UpperCamelCase = 5_00_00
_UpperCamelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , __a)
print(output[:, :3, :3])
# TODO Replace values below with what was printed above.
_UpperCamelCase = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
])
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
lowercase__ = 1E-4
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = tf.constant([[4, 10]])
_UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6)
_UpperCamelCase = emba(input_ids.shape)
_UpperCamelCase = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]])
tf.debugging.assert_near(__a , __a , atol=self.tolerance)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
])
_UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12)
emba([2, 16, 5_12])
_UpperCamelCase = emba.weight[:3, :5]
tf.debugging.assert_near(__a , __a , atol=self.tolerance)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
lowercase__ = 1E-4
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
# 2,12,16,64
_UpperCamelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 1_00
_UpperCamelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 1_00
_UpperCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64)
_UpperCamelCase = embed_positions([2, 16, 7_68])[None, None, :, :]
_UpperCamelCase , _UpperCamelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__a , __a , __a)
_UpperCamelCase = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
])
_UpperCamelCase = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
])
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __a , atol=self.tolerance)
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __a , atol=self.tolerance)
| 19 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dataset , _SCREAMING_SNAKE_CASE : Dict[str, str] ):
__a : int = args.log_outputs
__a : Dict = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__a : int = load_metric('wer' )
__a : Tuple = load_metric('cer' )
# compute metrics
__a : Tuple = wer.compute(references=result['target'] , predictions=result['prediction'] )
__a : Union[str, Any] = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__a : Any = F"""WER: {wer_result}\nCER: {cer_result}"""
print(_SCREAMING_SNAKE_CASE )
with open(F"""{dataset_id}_eval_results.txt""" , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__a : Optional[Any] = F"""log_{dataset_id}_predictions.txt"""
__a : int = F"""log_{dataset_id}_targets.txt"""
with open(_SCREAMING_SNAKE_CASE , 'w' ) as p, open(_SCREAMING_SNAKE_CASE , 'w' ) as t:
# mapping function to write output
def write_to_file(_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ):
p.write(F"""{i}""" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F"""{i}""" + '\n' )
t.write(batch['target'] + '\n' )
result.map(_SCREAMING_SNAKE_CASE , with_indices=_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[Any] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__a : int = re.sub(_SCREAMING_SNAKE_CASE , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__a : Tuple = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__a : Optional[int] = ' '.join(text.split(_SCREAMING_SNAKE_CASE ) )
return text
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
# load dataset
__a : Any = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_SCREAMING_SNAKE_CASE )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__a : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
__a : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
__a : Any = dataset.cast_column('audio' , Audio(sampling_rate=_SCREAMING_SNAKE_CASE ) )
# load eval pipeline
if args.device is None:
__a : Any = 0 if torch.cuda.is_available() else -1
__a : List[Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_SCREAMING_SNAKE_CASE : str ):
__a : Any = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__a : List[Any] = prediction['text']
__a : Optional[Any] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__a : Any = dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
__lowercase : Any = parser.parse_args()
main(args)
| 476 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = ["input_features", "is_longer"]
def __init__( self : List[str] , lowerCamelCase__ : Optional[Any]=64 , lowerCamelCase__ : int=4_80_00 , lowerCamelCase__ : str=4_80 , lowerCamelCase__ : str=10 , lowerCamelCase__ : List[str]=10_24 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : int=False , lowerCamelCase__ : float = 0 , lowerCamelCase__ : float = 1_40_00 , lowerCamelCase__ : int = None , lowerCamelCase__ : str = "fusion" , lowerCamelCase__ : str = "repeatpad" , **lowerCamelCase__ : Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : int = top_db
_UpperCAmelCase : Any = truncation
_UpperCAmelCase : Optional[Any] = padding
_UpperCAmelCase : int = fft_window_size
_UpperCAmelCase : Optional[int] = (fft_window_size >> 1) + 1
_UpperCAmelCase : Optional[int] = hop_length
_UpperCAmelCase : Union[str, Any] = max_length_s
_UpperCAmelCase : Union[str, Any] = max_length_s * sampling_rate
_UpperCAmelCase : List[str] = sampling_rate
_UpperCAmelCase : Optional[int] = frequency_min
_UpperCAmelCase : List[Any] = frequency_max
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale="htk" , )
_UpperCAmelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm="slaney" , mel_scale="slaney" , )
def lowerCAmelCase__ ( self : List[str] ) ->Dict[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : np.array , lowerCamelCase__ : Optional[np.array] = None ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase : int = spectrogram(
lowerCamelCase__ , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel="dB" , )
return log_mel_spectrogram.T
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : List[Any] = [0]
# randomly choose index for each part
_UpperCAmelCase : List[Any] = np.random.choice(ranges[0] )
_UpperCAmelCase : List[str] = np.random.choice(ranges[1] )
_UpperCAmelCase : List[str] = np.random.choice(ranges[2] )
_UpperCAmelCase : Dict = mel[idx_front : idx_front + chunk_frames, :]
_UpperCAmelCase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCAmelCase : int = mel[idx_back : idx_back + chunk_frames, :]
_UpperCAmelCase : Dict = torch.tensor(mel[None, None, :] )
_UpperCAmelCase : Any = torch.nn.functional.interpolate(
lowerCamelCase__ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=lowerCamelCase__ )
_UpperCAmelCase : List[str] = mel_shrink[0][0].numpy()
_UpperCAmelCase : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : np.array , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str ) ->np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCAmelCase : Optional[int] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCAmelCase : Optional[Any] = len(lowerCamelCase__ ) - max_length
_UpperCAmelCase : List[Any] = np.random.randint(0 , overflow + 1 )
_UpperCAmelCase : str = waveform[idx : idx + max_length]
_UpperCAmelCase : Dict = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCAmelCase : List[str] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
_UpperCAmelCase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCAmelCase : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCAmelCase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCAmelCase : List[str] = False
else:
_UpperCAmelCase : Optional[Any] = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
_UpperCAmelCase : Union[str, Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCAmelCase : Optional[int] = int(max_length / len(lowerCamelCase__ ) )
_UpperCAmelCase : int = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCAmelCase : Optional[Any] = int(max_length / len(lowerCamelCase__ ) )
_UpperCAmelCase : str = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : Any = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
_UpperCAmelCase : List[Any] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
_UpperCAmelCase : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCAmelCase : List[str] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : str , lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase__ : str = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : str , ) ->BatchFeature:
'''simple docstring'''
_UpperCAmelCase : Any = truncation if truncation is not None else self.truncation
_UpperCAmelCase : Any = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Tuple = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : int = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Tuple = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
_UpperCAmelCase : int = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : int = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCAmelCase : List[str] = [
self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__ )
for waveform in raw_speech
]
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Tuple = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCAmelCase : List[Any] = np.random.randint(0 , len(lowerCamelCase__ ) )
_UpperCAmelCase : List[str] = True
if isinstance(input_mel[0] , lowerCamelCase__ ):
_UpperCAmelCase : Dict = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCAmelCase : Any = [[longer] for longer in is_longer]
_UpperCAmelCase : str = {"input_features": input_mel, "is_longer": is_longer}
_UpperCAmelCase : Dict = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
_UpperCAmelCase : Optional[int] = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 40 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40 | 1 |
import os
from datetime import datetime as dt
from github import Github
SCREAMING_SNAKE_CASE : List[str] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def lowerCAmelCase_ ( ):
UpperCamelCase_ : int = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCamelCase_ : List[Any] = g.get_repo("""huggingface/diffusers""" )
UpperCamelCase_ : Union[str, Any] = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCamelCase_ : Optional[int] = sorted(issue.get_comments() , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 635 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_UpperCamelCase : List[str] = logging.getLogger(__name__)
_UpperCamelCase : int = 'pytorch_model.bin'
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."})
a_ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "A csv or a json file containing the validation data."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "The name of the task to train on."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "The list of labels for the task."})
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."})
a_ = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."})
a_ = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
a_ = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
a_ = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
a_ = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
a_ = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Random seed for initialization."} , )
def __UpperCAmelCase ( A : str , A : Optional[Any] , A : List[Any] , A : Any , A : Union[str, Any] , A : Dict ) -> int:
UpperCAmelCase_ : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCAmelCase_ : Tuple = dataset.filter(lambda A : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCAmelCase_ : Dict = int(eval_result * len(A ) )
print(A )
UpperCAmelCase_ : Optional[int] = dataset.sort('''probability''' , reverse=A )
UpperCAmelCase_ : Optional[Any] = dataset.select(range(A ) )
UpperCAmelCase_ : Dict = dataset.remove_columns(['''label''', '''probability'''] )
UpperCAmelCase_ : List[Any] = dataset.rename_column('''prediction''' , '''label''' )
UpperCAmelCase_ : List[str] = dataset.map(lambda A : {"label": idalabel[example["label"]]} )
UpperCAmelCase_ : Union[str, Any] = dataset.shuffle(seed=args.seed )
UpperCAmelCase_ : str = os.path.join(A , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(A , index=A )
else:
dataset.to_json(A )
def __UpperCAmelCase ( A : Any , A : int , A : Union[str, Any] , A : Dict , **A : Any ) -> Dict:
UpperCAmelCase_ : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ : Tuple = STModelArguments(model_name_or_path=A )
UpperCAmelCase_ : int = STDataArguments(train_file=A , infer_file=A )
UpperCAmelCase_ : Optional[Any] = STTrainingArguments(output_dir=A )
UpperCAmelCase_ : Optional[int] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A ).items():
setattr(A , A , A )
for key, value in kwargs.items():
if hasattr(A , A ):
setattr(A , A , A )
# Sanity checks
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCAmelCase_ : List[Any] = args.train_file
UpperCAmelCase_ : Optional[int] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCAmelCase_ : List[str] = args.eval_file
for key in data_files:
UpperCAmelCase_ : Dict = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
UpperCAmelCase_ : Any = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
UpperCAmelCase_ : Any = F"{args.output_dir}/self-train_iter-{{}}".format
UpperCAmelCase_ : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A )
os.makedirs(A , exist_ok=A )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : int = False
# Show the progress bar
UpperCAmelCase_ : int = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCAmelCase_ : str = data_dir_format(A )
assert os.path.exists(A )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCAmelCase_ : List[Any] = os.path.join(A , '''stage-1''' )
UpperCAmelCase_ : Dict = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A , A ):
arguments_dict.update({key: value} )
UpperCAmelCase_ : int = os.path.join(A , '''best-checkpoint''' , A )
if os.path.exists(A ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , A , A , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , A )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCAmelCase_ : Any = os.path.join(A , '''best-checkpoint''' )
UpperCAmelCase_ : Optional[int] = os.path.join(A , '''stage-2''' )
# Update arguments_dict
UpperCAmelCase_ : List[Any] = model_path
UpperCAmelCase_ : Optional[int] = data_files['''train''']
UpperCAmelCase_ : Union[str, Any] = current_output_dir
UpperCAmelCase_ : Optional[int] = os.path.join(A , '''best-checkpoint''' , A )
if os.path.exists(A ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , A , A , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , A )
UpperCAmelCase_ : Dict = iteration
UpperCAmelCase_ : Optional[Any] = data_dir_format(iteration + 1 )
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(os.path.join(A , '''best-checkpoint''' ) )
UpperCAmelCase_ : Optional[Any] = config.idalabel
UpperCAmelCase_ : List[Any] = os.path.join(A , '''eval_results_best-checkpoint.json''' )
UpperCAmelCase_ : Union[str, Any] = os.path.join(A , '''test_results_best-checkpoint.json''' )
assert os.path.exists(A )
with open(A , '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = float(json.load(A )[args.eval_metric] )
UpperCAmelCase_ : List[Any] = os.path.join(A , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(A )
# Loading the dataset from local csv or json files.
UpperCAmelCase_ : str = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
UpperCAmelCase_ : Union[str, Any] = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(A , exist_ok=A )
shutil.copy(A , os.path.join(A , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(A ):
shutil.copy(A , os.path.join(A , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(A , A , A , A , A , A )
accelerator.wait_for_everyone()
UpperCAmelCase_ : int = os.path.join(A , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCAmelCase_ : List[str] = eval_result
if best_iteration is None:
UpperCAmelCase_ : Any = new_iteration
UpperCAmelCase_ : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCAmelCase_ : str = new_iteration
UpperCAmelCase_ : Optional[int] = new_eval_result
UpperCAmelCase_ : Any = 0
else:
if new_eval_result == best_eval_result:
UpperCAmelCase_ : int = new_iteration
UpperCAmelCase_ : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCAmelCase_ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , A )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , F"eval_results_iter-{iteration}.json" ) , os.path.join(A , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(A , '''eval_results_best-iteration.json''' ) , )
| 541 | 0 |
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = 0
while b > 0:
if b & 1:
__lowerCAmelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 702 |
'''simple docstring'''
from PIL import Image
def __magic_name__( lowerCamelCase, lowerCamelCase):
def brightness(lowerCamelCase) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -2_55.0 <= level <= 2_55.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''')
return img.point(lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
_UpperCAmelCase : str = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 474 | 0 |
"""simple docstring"""
from ....utils import logging
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=2048 ):
__a = config.__dict__
__a = modal_hidden_size
if num_labels:
__a = num_labels
| 528 | """simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__:str = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__:List[str] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__:List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__:Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__:Tuple = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 528 | 1 |
"""simple docstring"""
a : List[Any] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->int:
UpperCAmelCase__ = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase__ = Stack()
UpperCAmelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(_SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCAmelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
operand_stack.push(_SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Union[str, Any] = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 422 |
"""simple docstring"""
import math
a : str = 10
a : List[Any] = 7
a : Tuple = BALLS_PER_COLOUR * NUM_COLOURS
def snake_case__ ( _SCREAMING_SNAKE_CASE = 2_0 ) ->str:
UpperCAmelCase__ = math.comb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 422 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__snake_case : List[str] = None
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case : str = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__snake_case : Optional[int] = {
'''moussaKam/mbarthez''': 1_024,
'''moussaKam/barthez''': 1_024,
'''moussaKam/barthez-orangesum-title''': 1_024,
}
__snake_case : Optional[Any] = '''▁'''
class A__ ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = BarthezTokenizer
def __init__( self: Any , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[Any]="<s>" , _SCREAMING_SNAKE_CASE: Any="</s>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Dict="<s>" , _SCREAMING_SNAKE_CASE: Optional[Any]="<unk>" , _SCREAMING_SNAKE_CASE: Dict="<pad>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask>" , **_SCREAMING_SNAKE_CASE: Any , ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , **_snake_case , )
__lowerCAmelCase : Optional[Any] = vocab_file
__lowerCAmelCase : List[Any] = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] = None) -> str:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase : Optional[Any] = [self.cls_token_id]
__lowerCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Dict = None) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
__lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: str = None) -> Optional[Any]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(_snake_case):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowerCAmelCase : Optional[int] = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case):
copyfile(self.vocab_file , _snake_case)
return (out_vocab_file,) | 293 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Union[str, Any] = '''examples/'''
__UpperCamelCase : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Optional[int] = '''README.md'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase )
lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
lowerCAmelCase = '1. Want to contribute a new model?'
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ):
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = get_version()
lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 4 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
a_ : int = ViTImageProcessor if is_vision_available() else None
@property
def _lowercase ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> str:
snake_case__ =(3, 32, 128)
snake_case__ =tempfile.mkdtemp()
# fmt: off
snake_case__ =['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
snake_case__ =dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
snake_case__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
snake_case__ ={
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
snake_case__ =os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self , **_UpperCAmelCase ) -> Tuple:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowercase ( self , **_UpperCAmelCase ) -> Dict:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowercase ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> List[Any]:
snake_case__ =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
snake_case__ =Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) )
return image_input
def _lowercase ( self ) -> int:
snake_case__ =self.get_tokenizer()
snake_case__ =self.get_image_processor()
snake_case__ =MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
snake_case__ =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _lowercase ( self ) -> str:
snake_case__ =self.get_tokenizer()
snake_case__ =self.get_image_processor()
snake_case__ =MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
snake_case__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
snake_case__ =self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
snake_case__ =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _lowercase ( self ) -> List[str]:
snake_case__ =self.get_image_processor()
snake_case__ =self.get_tokenizer()
snake_case__ =MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case__ =self.prepare_image_inputs()
snake_case__ =image_processor(_UpperCAmelCase , return_tensors='np' )
snake_case__ =processor(images=_UpperCAmelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ) -> List[str]:
snake_case__ =self.get_image_processor()
snake_case__ =self.get_tokenizer()
snake_case__ =MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case__ ='test'
snake_case__ =processor(text=_UpperCAmelCase )
snake_case__ =tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.get_image_processor()
snake_case__ =self.get_tokenizer()
snake_case__ =MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case__ ='test'
snake_case__ =self.prepare_image_inputs()
snake_case__ =processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def _lowercase ( self ) -> str:
snake_case__ =self.get_image_processor()
snake_case__ =self.get_tokenizer()
snake_case__ =MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ =processor.char_decode(_UpperCAmelCase )
snake_case__ =tokenizer.batch_decode(_UpperCAmelCase )
snake_case__ =[seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =self.get_image_processor()
snake_case__ =self.get_tokenizer()
snake_case__ =MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case__ =None
snake_case__ =self.prepare_image_inputs()
snake_case__ =processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =self.get_image_processor()
snake_case__ =self.get_tokenizer()
snake_case__ =MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case__ =torch.randn(1 , 27 , 38 )
snake_case__ =torch.randn(1 , 27 , 5_0257 )
snake_case__ =torch.randn(1 , 27 , 3_0522 )
snake_case__ =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 581 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 581 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
SCREAMING_SNAKE_CASE : str = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
SCREAMING_SNAKE_CASE : str = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
SCREAMING_SNAKE_CASE : List[str] = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def A ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_="uniform_average" , UpperCamelCase_=True ) -> Dict:
"""simple docstring"""
a_ : List[str] = mean_squared_error(
UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ , multioutput=UpperCamelCase_ , squared=UpperCamelCase_ )
return {"mse": mse}
| 419 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class snake_case__ ( __A ):
def A ( self ) -> int:
"""simple docstring"""
a_ : Optional[Any] = tempfile.mkdtemp()
a_ : Union[str, Any] = 8
# DPR tok
a_ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
a_ : List[Any] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
a_ : List[Any] = os.path.join(UpperCamelCase_ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
a_ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
a_ : List[Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
a_ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a_ : Any = {"""unk_token""": """<unk>"""}
a_ : List[str] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
a_ : List[str] = os.path.join(UpperCamelCase_ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : Union[str, Any] = os.path.join(UpperCamelCase_ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
def A ( self ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def A ( self ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def A ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def A ( self ) -> Optional[int]:
"""simple docstring"""
a_ : Tuple = os.path.join(self.tmpdirname , """rag_tokenizer""" )
a_ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
a_ : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(UpperCamelCase_ )
rag_tokenizer.save_pretrained(UpperCamelCase_ )
a_ : Union[str, Any] = RagTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , UpperCamelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , UpperCamelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def A ( self ) -> Dict:
"""simple docstring"""
a_ : List[str] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
a_ : int = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
a_ : Optional[int] = tokenizer(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : int = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
a_ : Any = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
a_ : Tuple = tokenizer(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
| 419 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
_lowercase: str = OmegaConf.load(__magic_name__ )
_lowercase: Tuple = torch.load(__magic_name__ , map_location="cpu" )["model"]
_lowercase: Tuple = list(state_dict.keys() )
# extract state_dict for VQVAE
_lowercase: Union[str, Any] = {}
_lowercase: Optional[int] = "first_stage_model."
for key in keys:
if key.startswith(__magic_name__ ):
_lowercase: str = state_dict[key]
# extract state_dict for UNetLDM
_lowercase: Tuple = {}
_lowercase: Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(__magic_name__ ):
_lowercase: Tuple = state_dict[key]
_lowercase: str = config.model.params.first_stage_config.params
_lowercase: str = config.model.params.unet_config.params
_lowercase: Any = VQModel(**__magic_name__ ).eval()
vqvae.load_state_dict(__magic_name__ )
_lowercase: Any = UNetLDMModel(**__magic_name__ ).eval()
unet.load_state_dict(__magic_name__ )
_lowercase: Optional[int] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__magic_name__ , )
_lowercase: Dict = LDMPipeline(__magic_name__ , __magic_name__ , __magic_name__ )
pipeline.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 701 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class A :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]=13 , _UpperCamelCase : Dict=7 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[int]=99 , _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : Optional[int]=4 , _UpperCamelCase : Dict=37 , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Optional[Any]=512 , _UpperCamelCase : str=16 , _UpperCamelCase : str=2 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : str=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Dict="None" , _UpperCamelCase : Dict=3 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : Tuple=None , ):
_lowercase: List[str] = parent
_lowercase: Optional[int] = batch_size
_lowercase: int = seq_length
_lowercase: List[str] = is_training
_lowercase: str = use_input_mask
_lowercase: str = use_token_type_ids
_lowercase: Tuple = use_labels
_lowercase: str = vocab_size
_lowercase: List[Any] = hidden_size
_lowercase: Union[str, Any] = num_hidden_layers
_lowercase: Tuple = num_attention_heads
_lowercase: Tuple = intermediate_size
_lowercase: Union[str, Any] = hidden_act
_lowercase: Any = hidden_dropout_prob
_lowercase: Union[str, Any] = attention_probs_dropout_prob
_lowercase: Union[str, Any] = max_position_embeddings
_lowercase: List[Any] = type_vocab_size
_lowercase: Dict = type_sequence_label_size
_lowercase: Dict = initializer_range
_lowercase: Tuple = num_labels
_lowercase: Dict = num_choices
_lowercase: Dict = relative_attention
_lowercase: str = position_biased_input
_lowercase: Tuple = pos_att_type
_lowercase: Optional[int] = scope
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowercase: Dict = None
if self.use_input_mask:
_lowercase: List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase: List[str] = None
if self.use_token_type_ids:
_lowercase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_lowercase: str = None
_lowercase: Optional[Any] = None
_lowercase: int = None
if self.use_labels:
_lowercase: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowercase: int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowercase: Union[str, Any] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Any):
_lowercase: Dict = TFDebertaVaModel(config=_UpperCamelCase)
_lowercase: List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowercase: Union[str, Any] = [input_ids, input_mask]
_lowercase: List[str] = model(_UpperCamelCase)
_lowercase: List[Any] = model(_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self : int , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any]):
_lowercase: str = TFDebertaVaForMaskedLM(config=_UpperCamelCase)
_lowercase: Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowercase: Optional[int] = model(_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self : Any , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int]):
_lowercase: Optional[Any] = self.num_labels
_lowercase: Union[str, Any] = TFDebertaVaForSequenceClassification(config=_UpperCamelCase)
_lowercase: Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowercase: List[Any] = model(_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]):
_lowercase: Optional[Any] = self.num_labels
_lowercase: Optional[int] = TFDebertaVaForTokenClassification(config=_UpperCamelCase)
_lowercase: Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowercase: Optional[int] = model(_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Dict):
_lowercase: Any = TFDebertaVaForQuestionAnswering(config=_UpperCamelCase)
_lowercase: List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowercase: int = model(_UpperCamelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: str = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): str = config_and_inputs
_lowercase: Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Any = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase : Union[str, Any] = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : List[Any] = False
lowerCamelCase : Optional[Any] = False
def UpperCAmelCase__ ( self : Dict):
_lowercase: List[Any] = TFDebertaVaModelTester(self)
_lowercase: int = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37)
def UpperCAmelCase__ ( self : Any):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any):
_lowercase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase)
def UpperCAmelCase__ ( self : int):
_lowercase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase)
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase)
@slow
def UpperCAmelCase__ ( self : Tuple):
_lowercase: List[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge")
self.assertIsNotNone(_UpperCamelCase)
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet")
def UpperCAmelCase__ ( self : int):
pass
@slow
def UpperCAmelCase__ ( self : int):
_lowercase: List[str] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge")
_lowercase: Optional[Any] = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]])
_lowercase: int = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_lowercase: Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase)[0]
_lowercase: Tuple = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , _UpperCamelCase , atol=1e-4)
| 206 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : int=3 , __lowerCAmelCase : int=18 , __lowerCAmelCase : Tuple=30 , __lowerCAmelCase : List[str]=4_00 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Tuple=False , ) -> List[str]:
_A = size if size is not None else {'''height''': 20, '''width''': 20}
_A = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_center_crop
_A = crop_size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_reduce_labels
def snake_case_ ( self : Optional[int] ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A = Image.open(dataset[0]['''file'''] )
_A = Image.open(dataset[1]['''file'''] )
return image, map
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_A = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A = Image.open(ds[0]['''file'''] )
_A = Image.open(ds[1]['''file'''] )
_A = Image.open(ds[2]['''file'''] )
_A = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : List[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[Any] ) -> Optional[Any]:
_A = BeitImageProcessingTester(self )
@property
def snake_case_ ( self : Dict ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : int ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
def snake_case_ ( self : int ) -> List[str]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def snake_case_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : int ) -> str:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
_A = []
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_A = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
_A , _A = prepare_semantic_single_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
_A , _A = prepare_semantic_batch_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def snake_case_ ( self : List[str] ) -> Dict:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_A , _A = prepare_semantic_single_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
_A = True
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 2 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = "AAPL" ) -> str:
_A = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
_A = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 2 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None
SCREAMING_SNAKE_CASE_ : torch.FloatTensor = None
SCREAMING_SNAKE_CASE_ : Optional[Tuple[torch.FloatTensor]] = None
SCREAMING_SNAKE_CASE_ : Optional[Tuple[torch.FloatTensor]] = None
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=512 , lowerCAmelCase__="cls" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = project_dim
SCREAMING_SNAKE_CASE = pooler_fn
SCREAMING_SNAKE_CASE = learn_encoder
SCREAMING_SNAKE_CASE = use_attention_mask
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = [R"""pooler""", R"""logit_scale"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [R"""position_ids""", R"""predictions.decoder.bias"""]
SCREAMING_SNAKE_CASE_ : int = """roberta"""
SCREAMING_SNAKE_CASE_ : List[Any] = RobertaSeriesConfig
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
super().__init__(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = XLMRobertaModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , 'has_pre_transformation' , lowerCAmelCase__ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __A ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Tuple:
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.base_model(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCAmelCase__ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE = outputs['hidden_states'][-2]
SCREAMING_SNAKE_CASE = self.pre_LN(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.transformation_pre(lowerCAmelCase__ )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 714 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''google/rembert''': 256,
}
__UpperCamelCase = '''▁'''
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RemBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 327 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
A : Any = Mapping[str, np.ndarray]
A : Any = Mapping[str, Any] # Is a nested dict.
A : Optional[int] = 0.01
@dataclasses.dataclass(frozen=A__ )
class A :
'''simple docstring'''
A__ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ = None
# Templates used to generate this protein (prediction-only)
A__ = None
# Chain corresponding to each parent
A__ = None
def UpperCamelCase ( __magic_name__ : str ) -> Protein:
"""simple docstring"""
lowercase__ = R'(\[[A-Z]+\]\n)'
lowercase__ = [tag.strip() for tag in re.split(snake_case_ , snake_case_ ) if len(snake_case_ ) > 0]
lowercase__ = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(snake_case_ ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = 'X' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(snake_case_ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(snake_case_ , g[1][axis].split() ) ) )
lowercase__ = np.array(snake_case_ )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(snake_case_ ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(snake_case_ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(snake_case_ ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=snake_case_ , atom_mask=snake_case_ , aatype=snake_case_ , residue_index=np.arange(len(snake_case_ ) ) , b_factors=snake_case_ , )
def UpperCamelCase ( __magic_name__ : Protein , __magic_name__ : int = 0 ) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(f'''REMARK {remark}''' )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(snake_case_ , snake_case_ ) if i == chain_id]
if parents is None or len(snake_case_ ) == 0:
lowercase__ = ['N/A']
pdb_headers.append(f'''PARENT {" ".join(snake_case_ )}''' )
return pdb_headers
def UpperCamelCase ( __magic_name__ : Protein , __magic_name__ : str ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = pdb_str.split("""\n""" )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f'''REMARK {remark}''' )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(snake_case_ ) , [] )
parent_dict[str(snake_case_ )].append(snake_case_ )
lowercase__ = max([int(snake_case_ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(snake_case_ ) , ["""N/A"""] )
parents_per_chain.append(snake_case_ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['N/A']]
def make_parent_line(__magic_name__ : Sequence[str] ) -> str:
return f'''PARENT {" ".join(snake_case_ )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(snake_case_ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(snake_case_ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(snake_case_ ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['N/A']
out_pdb_lines.append(make_parent_line(snake_case_ ) )
return "\n".join(snake_case_ )
def UpperCamelCase ( __magic_name__ : Protein ) -> str:
"""simple docstring"""
lowercase__ = residue_constants.restypes + ['X']
def res_atoa(__magic_name__ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
lowercase__ = get_pdb_headers(snake_case_ )
if len(snake_case_ ) > 0:
pdb_lines.extend(snake_case_ )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(snake_case_ ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(snake_case_ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = 'ATOM'
lowercase__ = atom_name if len(snake_case_ ) == 4 else f''' {atom_name}'''
lowercase__ = ''
lowercase__ = ''
lowercase__ = 1.0_0
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''
lowercase__ = 'A'
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
f'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
f'''{res_name_a:>3} {chain_tag:>1}'''
f'''{residue_index[i]:>4}{insertion_code:>1} '''
f'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
f'''{occupancy:>6.2f}{b_factor:>6.2f} '''
f'''{element:>2}{charge:>2}'''
)
pdb_lines.append(snake_case_ )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = 'TER'
lowercase__ = (
f'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(snake_case_ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(snake_case_ , snake_case_ ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(snake_case_ )
def UpperCamelCase ( __magic_name__ : Protein ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( __magic_name__ : FeatureDict , __magic_name__ : ModelOutput , __magic_name__ : Optional[np.ndarray] = None , __magic_name__ : Optional[np.ndarray] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[Sequence[str]] = None , __magic_name__ : Optional[Sequence[int]] = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=snake_case_ , remark=snake_case_ , parents=snake_case_ , parents_chain_index=snake_case_ , )
| 15 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase = 16
lowerCAmelCase = 32
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : DatasetDict , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int = 1_6 ) ->List[str]:
lowerCamelCase__ : Union[str, Any] =AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Dict =DatasetDict(
{
'train': dataset['train'].select(snake_case_ ),
'validation': dataset['train'].select(snake_case_ ),
'test': dataset['validation'],
} )
def tokenize_function(snake_case_ : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Tuple =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ : List[str] =datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : Dict =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case_ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ : int =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ : str =1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase__ : Union[str, Any] =8
else:
lowerCamelCase__ : int =None
return tokenizer.pad(
snake_case_ , padding='longest' , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[int] =DataLoader(
tokenized_datasets['train'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
lowerCamelCase__ : int =DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
lowerCamelCase__ : List[Any] =DataLoader(
tokenized_datasets['test'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) ->Union[str, Any]:
# New Code #
lowerCamelCase__ : Optional[int] =[]
# Download the dataset
lowerCamelCase__ : Optional[int] =load_dataset('glue' , 'mrpc' )
# Create our splits
lowerCamelCase__ : List[Any] =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCamelCase__ : List[str] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Any =config['lr']
lowerCamelCase__ : List[str] =int(config['num_epochs'] )
lowerCamelCase__ : List[Any] =int(config['seed'] )
lowerCamelCase__ : Tuple =int(config['batch_size'] )
lowerCamelCase__ : List[str] =evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ : int =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ : Union[str, Any] =batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ : Any =MAX_GPU_BATCH_SIZE
set_seed(snake_case_ )
# New Code #
# Create our folds:
lowerCamelCase__ : Tuple =kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
lowerCamelCase__ : List[str] =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(snake_case_ ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =get_fold_dataloaders(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : int =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : Tuple =AdamW(params=model.parameters() , lr=snake_case_ )
# Instantiate scheduler
lowerCamelCase__ : str =get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=1_0_0 , num_training_steps=(len(snake_case_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ : Dict =model(**snake_case_ )
lowerCamelCase__ : str =outputs.loss
lowerCamelCase__ : Optional[int] =loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**snake_case_ )
lowerCamelCase__ : Dict =outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Tuple =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
lowerCamelCase__ : Dict =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , snake_case_ )
# New Code #
# We also run predictions on the test set at the very end
lowerCamelCase__ : Any =[]
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**snake_case_ )
lowerCamelCase__ : List[Any] =outputs.logits
lowerCamelCase__ , lowerCamelCase__ : str =accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(snake_case_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCamelCase__ : Dict =torch.cat(snake_case_ , dim=0 )
lowerCamelCase__ : str =torch.stack(snake_case_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCamelCase__ : int =metric.compute(predictions=snake_case_ , references=snake_case_ )
accelerator.print('Average test metrics from all folds:' , snake_case_ )
def lowerCAmelCase_ ( ) ->str:
lowerCamelCase__ : Tuple =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case_ , default=snake_case_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=snake_case_ , default=3 , help='The number of splits to perform across the dataset' )
lowerCamelCase__ : Tuple =parser.parse_args()
lowerCamelCase__ : Optional[Any] ={'lr': 2E-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 174 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
for attribute in key.split("." ):
__lowerCAmelCase = getattr(__snake_case , __snake_case )
if weight_type is not None:
__lowerCAmelCase = getattr(__snake_case , __snake_case ).shape
else:
__lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(__snake_case )[0].split("." )[-2]
__lowerCAmelCase = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
__lowerCAmelCase = "weight_g"
elif "weight_v" in name:
__lowerCAmelCase = "weight_v"
elif "weight" in name:
__lowerCAmelCase = "weight"
elif "bias" in name:
__lowerCAmelCase = "bias"
else:
__lowerCAmelCase = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = full_name.split("conv_layers." )[-1]
__lowerCAmelCase = name.split("." )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = SEWConfig()
if is_finetuned:
__lowerCAmelCase = model.wav_encoder.wav_model.cfg
else:
__lowerCAmelCase = model.cfg
__lowerCAmelCase = fs_config.conv_bias
__lowerCAmelCase = eval(fs_config.conv_feature_layers )
__lowerCAmelCase = [x[0] for x in conv_layers]
__lowerCAmelCase = [x[1] for x in conv_layers]
__lowerCAmelCase = [x[2] for x in conv_layers]
__lowerCAmelCase = "gelu"
__lowerCAmelCase = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
__lowerCAmelCase = 0.0
__lowerCAmelCase = fs_config.activation_fn.name
__lowerCAmelCase = fs_config.encoder_embed_dim
__lowerCAmelCase = 0.02
__lowerCAmelCase = fs_config.encoder_ffn_embed_dim
__lowerCAmelCase = 1E-5
__lowerCAmelCase = fs_config.encoder_layerdrop
__lowerCAmelCase = fs_config.encoder_attention_heads
__lowerCAmelCase = fs_config.conv_pos_groups
__lowerCAmelCase = fs_config.conv_pos
__lowerCAmelCase = len(__snake_case )
__lowerCAmelCase = fs_config.encoder_layers
__lowerCAmelCase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__lowerCAmelCase = model.cfg
__lowerCAmelCase = fs_config.final_dropout
__lowerCAmelCase = fs_config.layerdrop
__lowerCAmelCase = fs_config.activation_dropout
__lowerCAmelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__lowerCAmelCase = fs_config.attention_dropout
__lowerCAmelCase = fs_config.dropout_input
__lowerCAmelCase = fs_config.dropout
__lowerCAmelCase = fs_config.mask_channel_length
__lowerCAmelCase = fs_config.mask_channel_prob
__lowerCAmelCase = fs_config.mask_length
__lowerCAmelCase = fs_config.mask_prob
__lowerCAmelCase = "Wav2Vec2FeatureExtractor"
__lowerCAmelCase = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=True ):
if is_finetuned:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__lowerCAmelCase = SEWConfig.from_pretrained(__snake_case )
else:
__lowerCAmelCase = convert_config(model[0] , __snake_case )
__lowerCAmelCase = model[0].eval()
__lowerCAmelCase = True if config.feat_extract_norm == "layer" else False
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
if is_finetuned:
if dict_path:
__lowerCAmelCase = Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCAmelCase = target_dict.pad_index
__lowerCAmelCase = target_dict.bos_index
__lowerCAmelCase = target_dict.pad_index
__lowerCAmelCase = target_dict.bos_index
__lowerCAmelCase = target_dict.eos_index
__lowerCAmelCase = len(target_dict.symbols )
__lowerCAmelCase = os.path.join(__snake_case , "vocab.json" )
if not os.path.isdir(__snake_case ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
with open(__snake_case , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , __snake_case )
__lowerCAmelCase = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__snake_case , )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
__lowerCAmelCase = SEWForCTC(__snake_case )
else:
__lowerCAmelCase = SEWModel(__snake_case )
feature_extractor.save_pretrained(__snake_case )
recursively_load_weights(__snake_case , __snake_case , __snake_case )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : List[str] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 290 |
def __lowerCAmelCase ( __snake_case , __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.